content stringlengths 5 1.05M |
|---|
import os.path
import hashlib
from random import SystemRandom
from string import ascii_letters, digits
from datetime import datetime
class Error(Exception):
pass
class SizeError(Error): # Criação de Erro para o tamanho da senha
def __init__(self, message):
self.message = message
def salva_senha(senha): # Método de gravação de hash da senha em diretório escolhido pelo usuário
criptografia_resposta = input('Deseja criptografar a senha em hash? [S/N] ').strip().upper()[0]
nome_arquivo = input('Digite o nome do arquivo a ser gerado: ').strip()
site = input('Essa senha é para o cadastro de qual site? (Pressione enter caso não queira compartilhar): ').strip()
print("""Escolha um local para salvar o arquivo:
1- Área de Trabalho
2- Downloads
3- Documentos
4- Digitar manualmente""")
while True:
escolha = int(input('Sua escolha: '))
if 0 < escolha < 5:
break
diretorio = f'/home/{os.getlogin()}/Área de Trabalho' if escolha == 1 else f'/home/{os.getlogin()}/Downloads' \
if escolha == 2 else f'/home/{os.getlogin()}/Documentos' # Selecionando o diretório do arquivo
while True:
try: # Validando o diretório inserido pelo usuário
if escolha == 4:
diretorio = input('Digite o diretório desejado para o arquivo: ').strip()
arquivo = open(os.path.join(diretorio, f'{nome_arquivo}.txt'), 'a')
except FileNotFoundError:
print('Diretório inválido')
else:
break
while criptografia_resposta not in 'SN':
criptografia_resposta = input('Deseja criptografar a senha em hash? [S/N] ').strip().upper()[0]
if criptografia_resposta == 'S': # Criptografando a senha em hash com algoritmo sha1
arquivo.write(f'{hashlib.sha1(senha.encode("utf-8")).hexdigest()} - {site}\n' if site != '' else
f'{hashlib.sha1(senha.encode("utf-8")).hexdigest()}')
else:
arquivo.write(f'{senha} - {site}\n' if site != '' else f'{senha}') # Salvando senha sem criptografia
def main():
# Registrando os caracteres desejados para a senha
chars = ascii_letters + digits + 'çÇ!@#$%&*()-_++§"´`[]{}ªº~^,.<>;:/?°\\|'
rnd = SystemRandom()
print('-' * 60)
print('Gerador de Senha Forte Aleatória by Neto')
print('-' * 60)
while True:
try: # Validando se a entrada é um número
tamanho = int(input('Digite a quantidade de dígitos de sua senha (Mínimo de 8 caracteres): '))
if tamanho < 8: # Validando se o tamanho é maior que 8 caracteres
raise SizeError('Tamanho menor que 8 caracteres.')
except ValueError:
print(f'Por favor, digite um número.')
except SizeError as erro:
print(erro)
else:
inicio = datetime.now()
print('Senha gerada: ', end='')
senha = ''.join(
rnd.choice(chars) for _ in range(tamanho)) # Geração da senha aleatória com os caracteres registrados
print(senha)
print(f'Senha gerada em: {datetime.now() - inicio}')
resposta_gravacao = input('Deseja gravar essa senha em um arquivo? [S/N] ').strip().upper()[0]
while resposta_gravacao not in 'SN':
resposta_gravacao = input('Deseja gravar essa senha em um arquivo? [S/N] ').strip().upper()[0]
if resposta_gravacao == 'S': # Gravação da senha em arquivo
salva_senha(senha)
resposta = input('Quer gerar mais uma senha? [S/N] ').strip().upper()[0]
while resposta not in 'SN':
resposta = input('Quer gerar mais uma senha? [S/N] ').strip().upper()[0]
if resposta == 'N':
print('Encerrando...')
break
if __name__ == '__main__':
main()
|
from threading import Thread
import time
""" 多线程中使用全局变量 """
# 注意线程与进程不同的是,多线程间是可以共享全局变量的
g_num = 100
def work1():
global g_num
for i in range(3):
g_num += 1
print('--- in work1, g_num is %d ---' % g_num)
def work2():
global g_num
print('--- in work2, g_num is %d ---' % g_num)
print('--- 线程创建之前,g_num is %d ---' % g_num)
t1 = Thread(target=work1)
t1.start()
time.sleep(1)
t2 = Thread(target=work2)
t2.start()
|
print("dev_demo")
def test_main():
<<<<<<< HEAD
print("main 分支")
=======
print("dev 分支")
>>>>>>> dev
|
from ..mig.python import ReactBase
from ..mig.python.ReactConst import ReactConst
MAX_PAYLOAD = ReactConst.MAX_REACT_PAYLOAD
class Msg(ReactBase.Msg):
global __HLEN
__HLEN = ReactBase.Msg().offset_payload_start_byte()
def __init__(self, data=chr(0)*__HLEN,
addr=None, gid=None, base_offset=0, data_length=None):
assert len(data) >= __HLEN
ReactBase.Msg.__init__(self, data=data, addr=addr, gid=gid,
base_offset=base_offset, data_length=data_length)
def max_payload(self):
return MAX_PAYLOAD
max_payload = classmethod(max_payload)
def set_payload(self, payload):
assert len(self.data) >= __HLEN
self.data = self.data[0:__HLEN] + payload
def get_payload(self):
return self.data[__HLEN:]
def encode(self, msg, track_id=0):
"""
Returns msg encoded as list of ReactMsg.
A list is returned because a single message may exceed the payload
of a single ReactMsg.
"""
#print "msg: ",msg
type = msg.get_amType()
data = msg.dataGet()
total = len(data)
if total < 1:
raise ValueError, "message to encode must have a positive length"
encoded = []
for i in xrange(0, total, MAX_PAYLOAD):
remaining = total - i
data_part = data[i:i+MAX_PAYLOAD]
r = Msg()
r.set_payload(data_part)
# only the first part gets the type
r.set_type(type if i == 0 else 0)
r.set_track_id(track_id)
r.set_remaining(remaining)
encoded.append(r)
return encoded
encode = classmethod(encode)
def complete(self):
"""
Returns True if the payload can be extracted.
"""
return self.get_remaining() + __HLEN == len(self.dataGet())
def extract(self):
"""
Returns a tuple of (type, track_id, data) of the sub-message.
Raises an exception if the message is not complete. Use the
complete() predicate to test.
"""
if not self.complete():
raise ValueError, "not complete"
return (self.get_type(), self.get_track_id(), self.get_payload())
def merge(self, part):
"""
Concatentate part, a React.Msg, to self.
This use used for merging split messages.
"""
if part.get_type() != 0:
raise ValueError, "not a React message part"
if part.get_track_id() != self.get_track_id():
raise ValueError, "tracking ID change"
part_len = len(part.get_payload())
remaining = self.get_remaining() - len(self.data) + __HLEN
if part_len > remaining:
raise ValueError, "merged message exceeds length"
self.data += part.get_payload()
return self
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
class HomePageView(TemplateView):
template_name = "core/index.html"
class AboutView(TemplateView):
template_name = "core/about.html"
class BlogView(TemplateView):
template_name = "core/blog.html"
class ContactView(TemplateView):
template_name = "core/contact.html"
class BlogDetailView(TemplateView):
template_name = "core/blog-detail.html"
class EventView(TemplateView):
template_name = "core/events.html"
class FollowerView(TemplateView):
template_name = "core/follower.html"
class InstructorView(TemplateView):
template_name = "core/instructor.html"
class Dashboard(LoginRequiredMixin, TemplateView):
template_name = "circus/index.html"
class Explore(TemplateView):
template_name = "core/explore.html"
|
import json
from django.contrib import messages
from django.db.models import Q
from django.utils.crypto import get_random_string
from ..models import MediaCard, Settings, TorrentClient, TorrentTracker
from ..plugin_manager import dw_torrent_aio, dpt, get_m_cards_to_urls
def message_or_print(request, commands, text, type_message=messages.SUCCESS):
if commands:
print(text)
else:
messages.add_message(
request,
type_message,
text,
)
def sort_dw_tasks_m_cards(m_cards, user, id_m_card):
"""
Сортировка m_cards для загрузки в соотвестви с требованиями плагина.
return:
stop_list_url - Эти m_cards загрузить нельзя!
В базе нет нужного cookies, а загрузка без авторизации или по magnet-сылки не доступна.
urls - Словарь url и его cookies. Если возможна загрузка без авторизации cookies будет пустой.
magnet_urls - Загрузка возможна напрямую без прокси и cookies
"""
stop_list_url = []
urls = {}
magnet_urls = []
cookies = get_cookies(user)
if id_m_card.isdigit():
filter_m_cards = Q(id=id_m_card)
else:
filter_m_cards = Q(is_new_data=True)
for m_card in m_cards.filter(filter_m_cards):
m_card_cookies = cookies.get(m_card.plugin_name)
if m_card_cookies:
urls[m_card.torrent_url] = m_card_cookies
else:
# В базе для данного плагина нет cookies:
# Проверка, Возможна ли загрузка торрент-файла без авторизации
# Если нет, проверка наличия загрузки по магнет-ссылки
if not dpt[m_card.plugin_name].params('torrent_dw'):
if not dpt[m_card.plugin_name].params('magnet_dw'):
stop_list_url.append(m_card.id)
continue
else:
magnet_urls.append(m_card.magnet_url)
else:
# Загрузка торрент-файла без авторизации
urls[m_card.torrent_url] = {}
return stop_list_url, urls, magnet_urls
# @timeout(10)
def download_torrents(id_m_card=None, user=None):
m_cards, settings = get_m_card_set(user=user, get_settings=True)
stop_list_url, urls, magnet_urls = sort_dw_tasks_m_cards(m_cards, user, id_m_card)
if not settings.t_client:
return False, False
else:
client = TorrentClient.objects.get(name=settings.t_client, user=user)
host = f'{client.host}:{client.port}'
login = client.login
password = client.password
try:
dw_torrent_aio(
magnet_urls=magnet_urls,
tasks=urls,
plugin_client=str(settings.t_client),
host=host,
login=login,
password=password,
)
except:
return False, False
return m_cards, stop_list_url
def get_cookies(user):
cookies_plugin = {}
cookies = TorrentTracker.objects.filter(user=user)
for cookie in cookies:
cookies_plugin[cookie.name] = json.loads(cookie.session)
return cookies_plugin
def get_m_card_set(request=None, get_settings=False, user=None):
if not user:
user = request.user
ordering = '-date_upd'
settings = Settings.objects.get(user__username=user)
criterion1 = Q(author__username=user)
if settings.use_shared_cards:
criterion2 = Q(is_view=True)
m_card = MediaCard.objects.order_by(ordering).filter(criterion1 | criterion2)
else:
m_card = MediaCard.objects.order_by(ordering).filter(criterion1)
if get_settings:
return m_card, settings
return m_card
def uncheck_new_data_m_card(m_cards, request, commands, stop_list=None):
if stop_list is None:
stop_list = []
for m_card in m_cards:
if m_card.author == request.user:
if m_card.id in stop_list:
continue
m_card.is_new_data = False
m_card.save()
else:
message_or_print(
request,
commands,
f'{m_card.short_name} - остается в списке т.к создана другим автором: {m_card.author}',
messages.SUCCESS)
def create_new_uid(request):
unique_id = get_random_string(length=64)
user_set = Settings.objects.get(user=request.user)
user_set.uuid = f'{user_set.id}:{unique_id}'
user_set.save()
def dw_update_m_cards(request=None, m_cards=None, user=None):
if not user:
user = request.user
if not m_cards:
m_cards = get_m_card_set(user=user)
torrents = [m_card.url for m_card in m_cards]
cookies = get_cookies(user)
upd_m_cards = get_m_cards_to_urls(torrents, cookies)
for m_card in m_cards:
if upd_m_cards[m_card.url]['date_upd'] > m_card.date_upd:
m_card.date_upd = upd_m_cards[m_card.url]['date_upd']
m_card.full_name = upd_m_cards[m_card.url]['full_name']
m_card.magnet_url = upd_m_cards[m_card.url]['magnet_url']
m_card.size = upd_m_cards[m_card.url]['size']
m_card.is_new_data = True
m_card.save()
return m_cards
def get_user_by_uid(uid):
if len(uid.split(':')) == 2:
user_id = uid.split(':')[0]
users = Settings.objects.filter(uuid=uid, user__id=user_id)
if users.count() == 1:
user = users.first()
return user.user
|
from google.cloud import texttospeech
#from google.cloud import exceptions
print("Hello")
###############################################################################
# Developer: Abdullah Najjar
# Description: Read 3 books from text files provided from an open source project,
# then, allows Google API to read through the text using natural language processing.
# Books: The stranger by Albert Camus,
# Bugs: 0, Needs testing to find out which parts are missing, add 2 books and sources
# Date: 10 October, 2019
###############################################################################
def readBook():
#input file #1 for The stranger
input_file_first = open("TheStranger-AlbertCamus.txt","r")
book_first = input_file_first.read()
input_file_first.close()
#input file #2 for
input_file_sec = open("TheStranger-AlbertCamus.txt","r")
book_sec = input_file_sec.read()
input_file_sec.close()
#input file #3 for
input_file_third = open("TheStranger-AlbertCamus.txt","r")
book_third = input_file_third.read()
input_file_third.close()
#Instance of a client
client = texttospeech.TextToSpeechClient()
#Set the text input to be synthesized
synthesis_in = texttospeech.types.SynthesisInput(text="welcome")
#voice request
voice = texttospeech.types.VoiceSelectionParams(language_code='en-US', ssml_gender=texttospeech.enums.SsmlVoiceGender.FEMALE)
#type of audio file to be returned
audio_config = texttospeech.types.AudioConfig(audio_encoding=texttospeech.enums.AudioEncoding.MP3)
#response
response = client.synthesize_speech(synthesis_input, voice, audio_config)
#response content, and output files
with open('Annie_output', 'wb') as out:
out.write(response.audio_content)
print('Audio output in file "Annie_output.mp3"')
|
#!/usr/bin/python3
with open('input.txt') as f:
input = f.read().splitlines()
def check_slope(x_inc, y_inc):
x = 0
y = 0
target = len(input)
trees = 0
while y < target:
if input[y][x] == '#':
trees += 1
x += x_inc
x %= len(input[0])
y += y_inc
return trees
m = 1
for x in range(1, 8, 2):
m *= check_slope(x, 1)
m *= check_slope(1, 2)
print(m)
|
import pandas as pd
import numpy as np
from sklearn import model_selection,preprocessing
import RN_Perceptron as rn
datos = pd.read_csv('../Datos/Drug5.csv')
#-- ordinales a numericos ---
mapeo = {'Sex': {'F':1, 'M':0},
'BP':{'HIGH':2, 'NORMAL':1, 'LOW':0},
'Cholesterol':{'NORMAL':0, 'HIGH':1}}
datos.replace(mapeo, inplace=True)
entradas = np.array(datos.iloc[:,:-1]) #-- todas las columnas menos la última
# # convirtiendo los atributos nominales en numericos
# entradas = np.array(pd.get_dummies(datos.iloc[:,:-1]))
## nombres de los atributos
#titulos = list(entradas.columns.values)
#--- SALIDA BINARIA : 1 si es "drugY" ; 0 si no ---
salidas = np.array(datos['Drug']=="drugY") * 1
nomClase = ['Otra', 'drugY']
#--- CONJUNTOS DE ENTRENAMIENTO Y TESTEO ---
X_train, X_test, T_train, T_test = model_selection.train_test_split(
entradas, salidas, test_size=0.30, random_state=42)
normalizarEntrada = 1 # 1 si normaliza; 0 si no
if normalizarEntrada:
# Escala los valores entre 0 y 1
min_max_scaler = preprocessing.MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)
X_test = min_max_scaler.transform(X_test)
alfa = 0.01
MAX_ITE = 650
[W, b, ite] = rn.entrena_Perceptron(X_train, T_train,alfa, MAX_ITE)
yTrain = rn.aplica_Perceptron(X_train, W, b)
# Calcular las respuestas del perceptron
yTest = rn.aplica_Perceptron(X_test,W,b)
aciertosTrain = 100 * np.sum(yTrain==T_train)/len(T_train)
aciertosTest = 100 * np.sum(yTest==T_test)/len(T_test)
print("iteraciones utilizadas = ",ite)
print("%% aciertos datos entrenamiento %.2f:" % aciertosTrain)
print("%% aciertos datos de testeo %.2f:" % aciertosTest)
|
""" Run part of speech tagger on a document collection and for every distinct word,
output the distribution of the part of speech tags """
import spacy
import plac
import logging
import numpy as np
import os
MAX_LENGTH = 100000
nlp = spacy.load('en_core_web_sm')
nlp.max_length = MAX_LENGTH
logging.basicConfig (format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO)
@plac.annotations(
srcdir=("the source directory", "positional"),
srcfile=("the source file", "positional"),
rate=("the sampling rate", "option", None, float)
)
def main (srcdir, srcfile, rate=1.0):
src = os.path.join (srcdir, srcfile)
tgt = os.path.join (srcdir, os.path.splitext (srcfile)[0] + ".pos")
V = dict ()
logging.info ("{0} Begin processing".format (src))
nAccepted = 0
nlines = 0
with open (src) as fin:
for line in fin:
nlines += 1
logging.info ("{0} contains {1} lines".format (src, nlines))
linenums = set (list (np.random.choice (nlines, int(rate * nlines), replace=False)))
logging.info ("No. of samples to be used: {0}(sampling rate: {1})".format (len(linenums), rate))
with open (src) as fin:
for i, line in enumerate (fin):
if i in linenums:
text = line.strip()
doc = nlp (text, disable=["parser", "ner"])
for token in doc:
t = token.text.lower()
tag = token.tag_
if t not in V:
V[t] = dict ()
if tag not in V[t]:
V[t][tag] = 0
V[t][tag] += 1
nAccepted += 1
if (i+1) % 1000 == 0:
logging.info ("{0} lines read; {1} accepted so far".format (i+1, nAccepted))
logging.info ("{0} Total lines accepted: {1}/{2}".format (src, nAccepted, i+1))
with open (tgt, "w") as fout:
for w in V:
fout.write ("{0}".format (w))
for tag in V[w]:
fout.write ("\t")
fout.write ("{0}:{1}".format (tag, V[w][tag]))
fout.write ("\n")
if __name__ == "__main__":
plac.call (main)
|
import time, random
import numpy as np
from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny
)
from yolov3_tf2.dataset import transform_images
from yolov3_tf2.utils import draw_outputs, convert_boxes
from deep_sort import preprocessing
from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
from PIL import Image
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
from matplotlib import cm
import mysql.connector as mariadb
import sys
# import tkinter libraries for GUI display
from tkinter import *
import tkinter as tk
#imports below for Yovolov, deep sort, etc.
# define flags for weights, classes, etc,
flags.DEFINE_string('classes', './data/labels/coco.names', 'path to classes file')
flags.DEFINE_string('weights', './weights/yolov3.tf',
'path to weights file')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_string('video', './data/video/test.mp4',
'path to video file or number for webcam)') #footage being analysed
flags.DEFINE_string('output', None, 'path to output video')
flags.DEFINE_string('output_format', 'XVID', 'codec used in VideoWriter when saving video to file')
flags.DEFINE_integer('num_classes', 80, 'number of classes in the model')
def market_section(i):
switcher = {
1 : 'shoes',
2 : 'pots',
3 : 'kitchen equipments',
4 : 'paintings',
5 : 'clothes',
6 : 'souvenir',
7 : 'bottles',
8 : 'bags'
}
return switcher.get(i,"Invalid section number")
# main
def main(_argv):
# Definition of the parameters
max_cosine_distance = 0.5
nn_budget = None
nms_max_overlap = 1.0
#initialize deep sort see github deep sort for more information
model_filename = 'model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
"""
A nearest neighbor distance metric that, for each target, returns
the closest distance to any sample that has been observed so far.
"""
metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
# multi target tracker
tracker = Tracker(metric)
# Return an identifiable list of physical devices visible to the host runtime
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# enable memory growth for physical devices
# utilised to identify type of YoloV3 used
if FLAGS.tiny:
yolo = YoloV3Tiny(classes=FLAGS.num_classes)
else:
yolo = YoloV3(classes=FLAGS.num_classes)
# load pre-trained weights
# pre-trained from open sources, many from public repos on github.
yolo.load_weights(FLAGS.weights)
logging.info('weights loaded')
# array contains name of classes (flags)
class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
logging.info('classes loaded')
# capture a video from the camera or a video file, files for our demonstrations.
try:
vid = cv2.VideoCapture(int(FLAGS.video))
except:
vid = cv2.VideoCapture(FLAGS.video)
# output video is empty
out = None
if FLAGS.output:
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))
list_file = open('detection.txt', 'w')
frame_index = -1
_, img = vid.read()
h, w, c = img.shape
h_numStep = 12; # number of boxes in a column
w_numStep = 20; # number of boxes in a row
#make matrix-array M of categories of different areas 1=food area, etc.
M = [[ 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5, 5],
[ 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5, 5],
[ 1, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 8, 8],
[ 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 8, 8, 8, 8],
[ 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8],
[ 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 7, 7],
[ 2, 2, 2, 2, 2, 2, 2, 2, 4, 6, 6, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7],
[ 2, 2, 2, 2, 2, 2, 2, 6, 6, 6, 6, 6, 8, 8, 8, 8, 8, 7, 7, 7, 7],
[ 2, 2, 2, 2, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 8, 8, 7, 7, 7, 7, 7],
[ 2, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 8, 7, 7, 7, 7, 7],
[ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7],
[ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7]]
# store the total time that customers stay in box[i][j]
total_time_engage = [[0 for i in range(w_numStep+1)] for j in range(h_numStep+1)]
# store the time that customer k is stationary in box[i][j]
stationary_time = [[[0 for i in range(w_numStep+1)] for j in range(h_numStep+1)] for k in range(100000)]
# store the positions of single customer
x_single_tracking = []
y_single_tracking = []
# single customer's trackingID
single_trackingID = 34
# store the current position of customer
max_trackID = 0;
x_trackID = [-1] * 1000000
y_trackID = [-1] * 1000000
# file store the total_time_engage
file = 'total_time_engage.txt'
fps = 0.0
count = 0
while True:
_, img = vid.read()
if img is None:
logging.warning("Empty Frame")
time.sleep(0.1)
count+=1
if count < 3:
continue
else:
break
# convert an image from one color space to another
img_in = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# return a tensor with a length 1 axis inserted at index 0
img_in = tf.expand_dims(img_in, 0)
# resize the image to 416x416
# remember resolution has to be able to work with it
# tensorflow.image.resize: resize image to size
img_in = transform_images(img_in, FLAGS.size)
# return the number of seconds passed since epoch
t1 = time.time()
time_finish_last_tracking = t1;
boxes, scores, classes, nums = yolo.predict(img_in)
classes = classes[0]
names = []
for i in range(len(classes)):
names.append(class_names[int(classes[i])])
names = np.array(names)
converted_boxes = convert_boxes(img, boxes[0])
features = encoder(img, converted_boxes)
# detections
detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in zip(converted_boxes, scores[0], names, features)]
#initialize color map
cmap = plt.get_cmap('tab20b')
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
# run non-maxima suppresion
boxs = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
classes = np.array([d.class_name for d in detections])
indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# Pass detections to the deepsort object and obtain the track information
# predicts and updates via detection
tracker.predict()
tracker.update(detections)
# draw horizontal boxes
y_step = int(h/h_numStep);
y_start = 0;
while True:
y_end = y_start + y_step
cv2.rectangle(img, (0, y_start), (int(w), y_end) , (0,0,0), 1)
y_start = y_end
if y_start >= int(h):
break # finish drawing here
# draw vertical boxes
x_step = int(w/w_numStep);
x_start = 0;
while True:
x_end = x_start + x_step
cv2.rectangle(img, (x_start, 0), (x_end, int(h)) , (0,0,0), 1)
x_start = x_end
if x_start >= int(w):
break # finish drawing here
time_step = time.time() - time_finish_last_tracking
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr() # get the corrected/predicted bounding box
class_name = track.get_class() # get the class name of particular object
color = colors[int(track.track_id) % len(colors)]
color = [i * 255 for i in color]
# identify center of a boundary box
x_cent = int(bbox[0] + (bbox[2]-bbox[0])/2)
y_cent = int(bbox[1] + (bbox[3]-bbox[1])/2)
# draw detection on frame
cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2) # draw rectangle
cv2.rectangle(img, (int(bbox[0]), int(bbox[1]-30)), (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1)
cv2.putText(img, class_name + "-" + str(track.track_id),(int(bbox[0]), int(bbox[1]-10)),0, 0.75, (255,255,255),2) # insert objectName and objectID
# display the area each person is in
# cv
# update the stationary_time and total_time_engage array
if class_name == "person":
x_pos = int(x_cent/x_step)
y_pos = int(y_cent/y_step)
#print(str(track.track_id) + ": [" + str(y_pos) + ", " + str(x_pos) + "]")
if track.track_id > max_trackID:
max_trackID = track.track_id
x_trackID[track.track_id] = y_pos
y_trackID[track.track_id] = x_pos
stationary_time[track.track_id][y_pos][x_pos] += time_step
total_time_engage[y_pos][x_pos] += time_step
# track a single person
if class_name == "person" and track.track_id == single_trackingID:
x_single_tracking.append(x_pos)
y_single_tracking.append(y_pos)
for track_index in range(max_trackID + 1):
if x_trackID[track_index] != -1:
print ("customerID " + str(track_index) + ": [" + str(x_trackID[track_index]) + "," + str(y_trackID[track_index]) + "] in "
+ market_section(M[x_trackID[track_index]][y_trackID[track_index]]))
with open(file, 'w') as filetostore:
for i in range(h_numStep):
for j in range(w_numStep):
filetostore.write("{:.2f}".format(total_time_engage[i][j]) + " ")
filetostore.write("\n")
### UNCOMMENT BELOW IF YOU WANT CONSTANTLY CHANGING YOLO DETECTIONS TO BE SHOWN ON SCREEN
#for det in detections:
# bbox = det.to_tlbr()
# cv2.rectangle(img,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,0,0), 2)
time_finish_last_tracking = time.time()
# print fps on screen
fps = ( fps + (1./(time.time()-t1)) ) / 2
cv2.putText(img, "FPS: {:.2f}".format(fps), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
cv2.imshow('output', img)
if FLAGS.output:
out.write(img)
frame_index = frame_index + 1
list_file.write(str(frame_index)+' ')
if len(converted_boxes) != 0:
for i in range(0,len(converted_boxes)):
list_file.write(str(converted_boxes[i][0]) + ' '+str(converted_boxes[i][1]) + ' '+str(converted_boxes[i][2]) + ' '+str(converted_boxes[i][3]) + ' ')
list_file.write('\n')
# press q to quit
if cv2.waitKey(1) == ord('q'):
break
f = open("total_time_engage.txt", "rt")
f.close()
# insert data into the database
# initialise track arrays
track_time = [0] * 10000000
track_customerID = [0] * 10000000
track_area = ["" for x in range(10000000)]
x_single = [0] * 10000000
y_single = [0] * 10000000
# organise data to be inserted
track_index = -1
for k in range(1000):
for h in range(h_numStep):
for w in range(w_numStep):
if stationary_time[k][h][w] != 0:
track_index += 1
track_time[track_index] = stationary_time[k][h][w]
track_customerID[track_index] = k
track_area[track_index] = str(h) + ', ' + str(w)
x_tmp = -1
y_tmp = -1
single_track_index = -1;
for k in range(len(x_single_tracking)):
if x_single_tracking[k] != x_tmp and y_single_tracking[k] != y_tmp:
single_track_index += 1
x_single[single_track_index] = x_single_tracking[k]
y_single[single_track_index] = y_single_tracking[k]
x_tmp = x_single[single_track_index]
y_tmp = y_single[single_track_index]
single_tracking_areas = ""
for k in range(single_track_index):
single_tracking_areas += '[' + str(x_single[k]) + ',' + str(y_single[k]) + '] , '
# connect and insert the appropriate data in primary_table
for k in range(track_index+1):
try:
conn = mariadb.connect( user="root",
password="root",
host="localhost",
database="trackingDB")
cur = conn.cursor()
mySql_insert_query = """INSERT INTO primary_table(trackID, customerID, area)
VALUES (%s, %s, %s) """
recordTuple = (k, track_customerID[k], track_area[k])
cur.execute(mySql_insert_query, recordTuple)
conn.commit()
except mariadb.Error as error:
print ("Failed to insert record into the primary_table {}".format(error))
finally:
if (conn.is_connected()):
cur.close()
conn.close()
# connect and insert the appropriate data in "engaged" table
for k in range(track_index+1):
try:
conn = mariadb.connect( user="root",
password="root",
host="localhost",
database="trackingDB")
cur = conn.cursor()
mySql_insert_query = """INSERT INTO engaged(trackID, engagement_time)
VALUES (%s, %s) """
recordTuple = (k, track_time[k])
cur.execute(mySql_insert_query, recordTuple)
conn.commit()
except mariadb.Error as error:
print ("Failed to insert record into the engaged table {}".format(error))
finally:
if (conn.is_connected()):
cur.close()
conn.close()
# connect and insert the appropriate data in "total_areas" table
try:
conn = mariadb.connect( user="root",
password="root",
host="localhost",
database="trackingDB")
cur = conn.cursor()
mySql_insert_query = """INSERT INTO total_areas(customerID, all_areas_visited)
VALUES (%s, %s) """
recordTuple = (single_trackingID, single_tracking_areas)
cur.execute(mySql_insert_query, recordTuple)
conn.commit()
except mariadb.Error as error:
print ("Failed to insert record into the total_areas table {}".format(error))
finally:
if (conn.is_connected()):
cur.close()
conn.close()
# plot the graph
fig = plt.figure(1)
fig.suptitle('Engagement time on different areas', fontsize=20)
ax = plt.axes(projection='3d')
ax = plt.axes(projection='3d')
# Data for a three-dimensional line
x = np.arange(w_numStep-1, -1, -1)
y = np.linspace(0, h_numStep-1, h_numStep)
X, Y = np.meshgrid(x, y)
Z = [[0 for j in range(w_numStep)] for i in range(h_numStep)]
for i in range(h_numStep):
for j in range(w_numStep):
Z[i][j] = total_time_engage[i][j]
Z = np.array(Z)
# Plot the surface.
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='viridis', edgecolor='none')
ax.set_xlabel('width')
ax.set_ylabel('height')
ax.set_zlabel('time')
ax.view_init(35, 80)
#gets the polar axis on the current image
frame = plt.gca()
#gets x and y axis list of x and y axis tick locations
frame.axes.get_xaxis().set_ticks([])
frame.axes.get_yaxis().set_ticks([])
#Plots the figure
fig2 = plt.figure(2)
fig2_title = 'Walking pattern of a single customer( trackingID = ' + str(single_trackingID) + ')'
fig2.suptitle(fig2_title, fontsize=15)
plt.plot(x_single_tracking,y_single_tracking, 'ro')
plt.axis([0,w_numStep,h_numStep,0])
frame.axes.get_xaxis().set_ticks([])
frame.axes.get_yaxis().set_ticks([])
fig.savefig('engage_level.jpg')
fig2.savefig('single_tracking.jpg')
plt.show()
vid.release()
if FLAGS.ouput:
out.release()
list_file.close()
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
import os, sys, glob
testdir = sys.argv[1]
forcegen = len(sys.argv) > 2
testpatt = os.path.join(testdir, 'tests', '*.py')
testfiles = glob.glob(testpatt)
testfiles.sort()
for test in testfiles:
infile = os.path.basename(test).replace('.py', '.in')
inpath = os.path.join(testdir, 'inputs', infile)
outfile = os.path.basename(test).replace('.py', '.out')
outpath = os.path.join(testdir, 'outputs', outfile)
argfile = os.path.basename(test).replace('.py', '.arg')
argpath = os.path.join(testdir, 'args', argfile)
pypath = sys.executable
arglist = open(argpath).readline()
(stdin, stdout) = os.popen2('%s %s %s' % (pypath, test, arglist))
stdin.write(open(inpath).read())
stdin.close()
output = stdout.read()
if not os.path.exists(outpath) or forcegen:
print 'generating:', outpath
open(outpath, 'w').write(output)
else:
oldout = open(outpath).read()
if oldout == output:
print 'passed:', test
else:
print 'FAILED:', test
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# @File : run.py
# @Created : 2020/10/29 3:30 下午
# @Software : PyCharm
#
# @Author : Liu.Qi
# @Contact : liuqi_0725@aliyun.com
#
# @Desc : 启动类
# -------------------------------------------------------------------------------
import argparse
import os
import sys
import signal
from chaussette.server import make_server
from werkzeug.serving import run_with_reloader
from tokenmanager.app import create_app
def _quit(signal,frame):
print("Bye!")
sys.exit(0)
def main(args=sys.argv[1:]):
"""
入库
:param args: 去掉第一个 第一个 -h help
:return:
"""
parser = argparse.ArgumentParser(description="Vanas Token Manager")
# 添加参数
parser.add_argument("--fd", type=int ,default=None)
# parser.add_argument("--config-file", type=str,default=None, help="Config file path")
# 解析参数
args = parser.parse_args(args)
# 创建 flask
app = create_app(os.getenv('VANAS_TOKEN_ENV') or 'default')
app.logger.info("vanas-tokenmanager create app.")
# 默认值
host = app.config.get('host', '0.0.0.0')
port = app.config.get('port', 5000)
debug = app.config.get('DEBUG', True)
signal.signal(signal.SIGINT, _quit)
signal.signal(signal.SIGTERM, _quit)
# app.logger.info("args.fd >> {}".format(args.fd))
# app.logger.info("args.config_file >> {}".format(args.config_file))
def runner():
if args.fd is not None:
# use chaussette
httpd = make_server(app, host='fd://%d' % args.fd)
httpd.serve_forever()
else:
app.run(debug=debug, host=host, port=port)
app.logger.info("Web Runner.")
runner()
if __name__ == "__main__":
main() |
import http
from flask import Blueprint
root_blueprint = Blueprint("root", __name__, url_prefix="/")
@root_blueprint.route("/", methods=["GET"])
@root_blueprint.route("/index", methods=["GET"])
def index():
return "Hello, World!", http.HTTPStatus.OK
|
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
import unittest
from . import *
if __name__ == "__main__":
unittest.main()
|
from .API import API |
x = 1
x = 'Hello Joe'
print(x + '!') # OK
print(x + 2) # During runtime, we get `TypeError: must be str, not int`.
|
from py_cron_expression.py_cron import CronJobExpression
__all__ = ["CronJobExpression"]
|
import re
from functools import partial
from typing import Any, Dict, Optional
from inspect import signature
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message, \
validate_extract_webhook_http_header, UnexpectedWebhookEventType
from zerver.lib.webhooks.git import CONTENT_MESSAGE_TEMPLATE, \
TOPIC_WITH_BRANCH_TEMPLATE, TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE, \
get_commits_comment_action_message, get_issue_event_message, \
get_pull_request_event_message, get_push_commits_event_message, \
get_push_tag_event_message, get_setup_webhook_message
from zerver.models import UserProfile
from zerver.lib.webhooks.common import \
get_http_headers_from_filename
fixture_to_headers = get_http_headers_from_filename("HTTP_X_GITHUB_EVENT")
class UnknownEventType(Exception):
pass
def get_opened_or_update_pull_request_body(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
pull_request = payload['pull_request']
action = payload['action']
if action == 'synchronize':
action = 'updated'
assignee = None
if pull_request.get('assignee'):
assignee = pull_request['assignee']['login']
return get_pull_request_event_message(
get_sender_name(payload),
action,
pull_request['html_url'],
target_branch=pull_request['head']['ref'],
base_branch=pull_request['base']['ref'],
message=pull_request['body'],
assignee=assignee,
number=pull_request['number'],
title=pull_request['title'] if include_title else None
)
def get_assigned_or_unassigned_pull_request_body(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
pull_request = payload['pull_request']
assignee = pull_request.get('assignee')
if assignee is not None:
assignee = assignee.get('login')
base_message = get_pull_request_event_message(
get_sender_name(payload),
payload['action'],
pull_request['html_url'],
number=pull_request['number'],
title=pull_request['title'] if include_title else None
)
if assignee is not None:
return "{} to {}.".format(base_message[:-1], assignee)
return base_message
def get_closed_pull_request_body(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
pull_request = payload['pull_request']
action = 'merged' if pull_request['merged'] else 'closed without merge'
return get_pull_request_event_message(
get_sender_name(payload),
action,
pull_request['html_url'],
number=pull_request['number'],
title=pull_request['title'] if include_title else None
)
def get_membership_body(payload: Dict[str, Any]) -> str:
action = payload['action']
member = payload['member']
team_name = payload['team']['name']
return u"{sender} {action} [{username}]({html_url}) {preposition} the {team_name} team.".format(
sender=get_sender_name(payload),
action=action,
username=member['login'],
html_url=member['html_url'],
preposition='from' if action == 'removed' else 'to',
team_name=team_name
)
def get_member_body(payload: Dict[str, Any]) -> str:
return u"{} {} [{}]({}) to [{}]({}).".format(
get_sender_name(payload),
payload['action'],
payload['member']['login'],
payload['member']['html_url'],
get_repository_name(payload),
payload['repository']['html_url']
)
def get_issue_body(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
action = payload['action']
issue = payload['issue']
assignee = issue['assignee']
return get_issue_event_message(
get_sender_name(payload),
action,
issue['html_url'],
issue['number'],
issue['body'],
assignee=assignee['login'] if assignee else None,
title=issue['title'] if include_title else None
)
def get_issue_comment_body(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
action = payload['action']
comment = payload['comment']
issue = payload['issue']
if action == 'created':
action = '[commented]'
else:
action = '{} a [comment]'.format(action)
action += '({}) on'.format(comment['html_url'])
return get_issue_event_message(
get_sender_name(payload),
action,
issue['html_url'],
issue['number'],
comment['body'],
title=issue['title'] if include_title else None
)
def get_fork_body(payload: Dict[str, Any]) -> str:
forkee = payload['forkee']
return u"{} forked [{}]({}).".format(
get_sender_name(payload),
forkee['name'],
forkee['html_url']
)
def get_deployment_body(payload: Dict[str, Any]) -> str:
return u'{} created new deployment.'.format(
get_sender_name(payload),
)
def get_change_deployment_status_body(payload: Dict[str, Any]) -> str:
return u'Deployment changed status to {}.'.format(
payload['deployment_status']['state'],
)
def get_create_or_delete_body(payload: Dict[str, Any], action: str) -> str:
ref_type = payload['ref_type']
return u'{} {} {} {}.'.format(
get_sender_name(payload),
action,
ref_type,
payload['ref']
).rstrip()
def get_commit_comment_body(payload: Dict[str, Any]) -> str:
comment = payload['comment']
comment_url = comment['html_url']
commit_url = comment_url.split('#', 1)[0]
action = u'[commented]({})'.format(comment_url)
return get_commits_comment_action_message(
get_sender_name(payload),
action,
commit_url,
comment.get('commit_id'),
comment['body'],
)
def get_push_tags_body(payload: Dict[str, Any]) -> str:
return get_push_tag_event_message(
get_sender_name(payload),
get_tag_name_from_ref(payload['ref']),
action='pushed' if payload.get('created') else 'removed'
)
def get_push_commits_body(payload: Dict[str, Any]) -> str:
commits_data = [{
'name': (commit.get('author').get('username') or
commit.get('author').get('name')),
'sha': commit['id'],
'url': commit['url'],
'message': commit['message']
} for commit in payload['commits']]
return get_push_commits_event_message(
get_sender_name(payload),
payload['compare'],
get_branch_name_from_ref(payload['ref']),
commits_data,
deleted=payload['deleted']
)
def get_public_body(payload: Dict[str, Any]) -> str:
return u"{} made [the repository]({}) public.".format(
get_sender_name(payload),
payload['repository']['html_url'],
)
def get_wiki_pages_body(payload: Dict[str, Any]) -> str:
wiki_page_info_template = u"* {action} [{title}]({url})\n"
wiki_info = u''
for page in payload['pages']:
wiki_info += wiki_page_info_template.format(
action=page['action'],
title=page['title'],
url=page['html_url'],
)
return u"{}:\n{}".format(get_sender_name(payload), wiki_info.rstrip())
def get_watch_body(payload: Dict[str, Any]) -> str:
return u"{} starred [the repository]({}).".format(
get_sender_name(payload),
payload['repository']['html_url']
)
def get_repository_body(payload: Dict[str, Any]) -> str:
return u"{} {} [the repository]({}).".format(
get_sender_name(payload),
payload.get('action'),
payload['repository']['html_url']
)
def get_add_team_body(payload: Dict[str, Any]) -> str:
return u"[The repository]({}) was added to team {}.".format(
payload['repository']['html_url'],
payload['team']['name']
)
def get_release_body(payload: Dict[str, Any]) -> str:
return u"{} published [the release]({}).".format(
get_sender_name(payload),
payload['release']['html_url'],
)
def get_page_build_body(payload: Dict[str, Any]) -> str:
build = payload['build']
status = build['status']
actions = {
'null': 'has yet to be built',
'building': 'is being built',
'errored': 'has failed{}',
'built': 'has finished building',
}
action = actions.get(status, 'is {}'.format(status))
action.format(
CONTENT_MESSAGE_TEMPLATE.format(message=build['error']['message'])
)
return u"Github Pages build, trigerred by {}, {}.".format(
payload['build']['pusher']['login'],
action
)
def get_status_body(payload: Dict[str, Any]) -> str:
if payload['target_url']:
status = '[{}]({})'.format(
payload['state'],
payload['target_url']
)
else:
status = payload['state']
return u"[{}]({}) changed its status to {}.".format(
payload['sha'][:7], # TODO
payload['commit']['html_url'],
status
)
def get_pull_request_review_body(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
title = "for #{} {}".format(
payload['pull_request']['number'],
payload['pull_request']['title']
)
return get_pull_request_event_message(
get_sender_name(payload),
'submitted',
payload['review']['html_url'],
type='PR Review',
title=title if include_title else None
)
def get_pull_request_review_comment_body(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
action = payload['action']
message = None
if action == 'created':
message = payload['comment']['body']
title = "on #{} {}".format(
payload['pull_request']['number'],
payload['pull_request']['title']
)
return get_pull_request_event_message(
get_sender_name(payload),
action,
payload['comment']['html_url'],
message=message,
type='PR Review Comment',
title=title if include_title else None
)
def get_pull_request_review_requested_body(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
requested_reviewers = (payload['pull_request']['requested_reviewers'] or
[payload['requested_reviewer']])
sender = get_sender_name(payload)
pr_number = payload['pull_request']['number']
pr_url = payload['pull_request']['html_url']
message = "**{sender}** requested {reviewers} for a review on [PR #{pr_number}]({pr_url})."
message_with_title = ("**{sender}** requested {reviewers} for a review on "
"[PR #{pr_number} {title}]({pr_url}).")
body = message_with_title if include_title else message
reviewers = ""
if len(requested_reviewers) == 1:
reviewers = "[{login}]({html_url})".format(**requested_reviewers[0])
else:
for reviewer in requested_reviewers[:-1]:
reviewers += "[{login}]({html_url}), ".format(**reviewer)
reviewers += "and [{login}]({html_url})".format(**requested_reviewers[-1])
return body.format(
sender=sender,
reviewers=reviewers,
pr_number=pr_number,
pr_url=pr_url,
title=payload['pull_request']['title'] if include_title else None
)
def get_check_run_body(payload: Dict[str, Any]) -> str:
template = """
Check [{name}]({html_url}) {status} ({conclusion}). ([{short_hash}]({commit_url}))
""".strip()
kwargs = {
'name': payload['check_run']['name'],
'html_url': payload['check_run']['html_url'],
'status': payload['check_run']['status'],
'short_hash': payload['check_run']['head_sha'][:7],
'commit_url': "{}/commit/{}".format(
payload['repository']['html_url'],
payload['check_run']['head_sha']
),
'conclusion': payload['check_run']['conclusion']
}
return template.format(**kwargs)
def get_star_body(payload: Dict[str, Any]) -> str:
template = "{user} {action} the repository."
return template.format(
user=payload['sender']['login'],
action='starred' if payload['action'] == 'created' else 'unstarred'
)
def get_ping_body(payload: Dict[str, Any]) -> str:
return get_setup_webhook_message('GitHub', get_sender_name(payload))
def get_repository_name(payload: Dict[str, Any]) -> str:
return payload['repository']['name']
def get_organization_name(payload: Dict[str, Any]) -> str:
return payload['organization']['login']
def get_sender_name(payload: Dict[str, Any]) -> str:
return payload['sender']['login']
def get_branch_name_from_ref(ref_string: str) -> str:
return re.sub(r'^refs/heads/', '', ref_string)
def get_tag_name_from_ref(ref_string: str) -> str:
return re.sub(r'^refs/tags/', '', ref_string)
def is_commit_push_event(payload: Dict[str, Any]) -> bool:
return bool(re.match(r'^refs/heads/', payload['ref']))
def get_subject_based_on_type(payload: Dict[str, Any], event: str) -> str:
if 'pull_request' in event:
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repository_name(payload),
type='PR',
id=payload['pull_request']['number'],
title=payload['pull_request']['title']
)
elif event.startswith('issue'):
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repository_name(payload),
type='Issue',
id=payload['issue']['number'],
title=payload['issue']['title']
)
elif event.startswith('deployment'):
return u"{} / Deployment on {}".format(
get_repository_name(payload),
payload['deployment']['environment']
)
elif event == 'membership':
return u"{} organization".format(payload['organization']['login'])
elif event == 'push_commits':
return TOPIC_WITH_BRANCH_TEMPLATE.format(
repo=get_repository_name(payload),
branch=get_branch_name_from_ref(payload['ref'])
)
elif event == 'gollum':
return TOPIC_WITH_BRANCH_TEMPLATE.format(
repo=get_repository_name(payload),
branch='Wiki Pages'
)
elif event == 'ping':
if payload.get('repository') is None:
return get_organization_name(payload)
elif event == 'check_run':
return u"{} / checks".format(get_repository_name(payload))
return get_repository_name(payload)
EVENT_FUNCTION_MAPPER = {
'team_add': get_add_team_body,
'commit_comment': get_commit_comment_body,
'closed_pull_request': get_closed_pull_request_body,
'create': partial(get_create_or_delete_body, action='created'),
'check_run': get_check_run_body,
'delete': partial(get_create_or_delete_body, action='deleted'),
'deployment': get_deployment_body,
'deployment_status': get_change_deployment_status_body,
'fork': get_fork_body,
'gollum': get_wiki_pages_body,
'issue_comment': get_issue_comment_body,
'issues': get_issue_body,
'member': get_member_body,
'membership': get_membership_body,
'opened_or_update_pull_request': get_opened_or_update_pull_request_body,
'assigned_or_unassigned_pull_request': get_assigned_or_unassigned_pull_request_body,
'page_build': get_page_build_body,
'ping': get_ping_body,
'public': get_public_body,
'pull_request_review': get_pull_request_review_body,
'pull_request_review_comment': get_pull_request_review_comment_body,
'pull_request_review_requested': get_pull_request_review_requested_body,
'push_commits': get_push_commits_body,
'push_tags': get_push_tags_body,
'release': get_release_body,
'repository': get_repository_body,
'star': get_star_body,
'status': get_status_body,
'watch': get_watch_body,
}
IGNORED_EVENTS = [
'repository_vulnerability_alert',
'project_card',
'check_suite',
'organization',
'milestone',
'meta',
]
@api_key_only_webhook_view('GitHub', notify_bot_owner_on_invalid_json=True)
@has_request_variables
def api_github_webhook(
request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body'),
branches: str=REQ(default=None),
user_specified_topic: Optional[str]=REQ("topic", default=None)) -> HttpResponse:
event = get_event(request, payload, branches)
if event is not None:
subject = get_subject_based_on_type(payload, event)
body_function = get_body_function_based_on_type(event)
if 'include_title' in signature(body_function).parameters:
body = body_function(
payload,
include_title=user_specified_topic is not None
)
else:
body = body_function(payload)
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
def get_event(request: HttpRequest, payload: Dict[str, Any], branches: str) -> Optional[str]:
event = validate_extract_webhook_http_header(request, 'X_GITHUB_EVENT', 'GitHub')
if event == 'pull_request':
action = payload['action']
if action in ('opened', 'synchronize', 'reopened', 'edited'):
return 'opened_or_update_pull_request'
if action in ('assigned', 'unassigned'):
return 'assigned_or_unassigned_pull_request'
if action == 'closed':
return 'closed_pull_request'
if action == 'review_requested':
return '{}_{}'.format(event, action)
# Unsupported pull_request events
if action in ('labeled', 'unlabeled', 'review_request_removed'):
return None
if event == 'push':
if is_commit_push_event(payload):
if branches is not None:
branch = get_branch_name_from_ref(payload['ref'])
if branches.find(branch) == -1:
return None
return "push_commits"
else:
return "push_tags"
elif event == 'check_run':
if payload['check_run']['status'] != 'completed':
return None
return event
elif event in list(EVENT_FUNCTION_MAPPER.keys()) or event == 'ping':
return event
elif event in IGNORED_EVENTS:
return None
raise UnexpectedWebhookEventType('GitHub', event)
def get_body_function_based_on_type(type: str) -> Any:
return EVENT_FUNCTION_MAPPER.get(type)
|
# Script Name : dir_test.py
# Author : Craig Richards
# Created : 29th November 2011
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Tests to see if the directory testdir exists, if not it will create the directory for you
from __future__ import print_function
from code1.color_print import Logger
import os
try:
home = os.path.expanduser('~')
Logger.warn(home)
if not os.path.exists(home):
print(home + 'not exists.')
except Exception as e:
print(e)
|
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
import subprocess
from domain.persistable import User
# 表单
@csrf_exempt
def login_form(request):
return render_to_response('login.html')
# 接收请求的登录数据
@csrf_exempt
def login(request):
request.encoding='utf-8'
username = request.POST['username'].encode("utf-8")
password = request.POST['password'].encode("utf-8")
c = {'username': username, 'password': password}
all_entries = User.objects.all()
print all_entries
return render_to_response( "index.html", c) |
import json
import os
import numpy as np
import scipy as sp
import pandas as pd
from pandas.tools.plotting import scatter_matrix
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.inset_locator import inset_axes
import ast
import cPickle as pickle
for i in range(1,21):
temp = []
for filename in os.listdir('data/ris/task_{}/'.format(i)):
d = pickle.load( open("data/ris/task_{}/{}".format(i,filename), "rb" ) )
temp.append(max([v for k,v in d[5].items()]))
print('{}\t{}'.format(min(temp),max(temp)))
# data = []
# par = ['']
# for i in range(1,21):
# best = 0
# for filename in os.listdir('data/ris/task_{}/'.format(i)):
# d = pickle.load( open("data/ris/task_{}/{}".format(i,filename), "rb" ) )
# a = max([v for k,v in d[5].items()])
# if a > best:
# best = a
# temp = d
# temp_par = filename.split('}')[0]+'}'
# data.append(temp)
# par.append(ast.literal_eval(temp_par))
# plt.rc('text', usetex=True)
# plt.rc('font', family='Times-Roman')
# sns.set_style(style='white')
# color = sns.color_palette("Set2", 10)
# fig = plt.figure(figsize=(10,10))
# i=1
# print('Task\tloss_train\tloss_val\tloss_test\tacc_train\tacc_val\tacc_test\tNB\tGL\tL2\tlr\tdr')
# for d in data:
# loss_train = [v for k,v in d[0].items()]
# loss_val = [v for k,v in d[2].items()]
# loss_test = [v for k,v in d[4].items()]
#
# acc_train = [v for k,v in d[1].items()]
# acc_val = [v for k,v in d[3].items()]
# acc_test = [v for k,v in d[5].items()]
#
# idx = np.where(acc_val == max(acc_val))[0][-1]
# print("%d\t%2f\t%2f\t%2f\t%2f\t%2f\t%2f\t%d\t%s\t%4f\t%4f\t%4f" % (i,loss_train[idx],loss_val[idx],
# loss_test[idx],acc_train[idx],
# acc_val[idx],acc_test[idx],
# int(par[i]['nb']),bool(par[i]['tr'][0]),
# float(par[i]['L2']),float(par[i]['lr']),float(par[i]['dr'])))
#
# ax = fig.add_subplot(5,4, i)
# plt.title("Task "+str(i))
# plt.plot(acc_train, label=str(i))
# plt.plot(acc_val)
# if( i in [1,5,9,13,17]):
# ax.set_ylabel("Accuracy")
# if( i in [17,18,19,20]):
# ax.set_xlabel("Epoch")
# if(acc_test[idx] >= 0.95):
# ax.patch.set_facecolor("green")
# ax.patch.set_alpha(0.5)
# else:
# ax.patch.set_facecolor("red")
# ax.patch.set_alpha(0.5)
# i+=1
#
# plt.tight_layout()
# # plt.savefig('data/acc.pdf', format='pdf', dpi=300)
#
# plt.show()
|
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
#import pdb; pdb.set_trace()
def wordMultiply(word):
for count in range(4):
print word
if __name__ == "__main__":
wordMultiply("happy")
|
from chips.api.api import Stimulus, Response, Input, Output, Chip
from fpga_tx.build_tools import build_vivado
from fpga_tx.download_tools import vivado
import fpga_tx.flash_tools
build_tool = build_vivado
download_tool = vivado
flash_tool = fpga_tx.flash_tools.vivado
device = "XC7A15T-CPG236-1"
flash = "n25q32-3.3v-spi-x1_x2_x4"
board="Atlys"
def make_chip():
chip = Chip("user_design")
#create stimulus and response
Input(chip, "input_rs232_rx")
Output(chip, "output_rs232_tx")
Input(chip, "input_gps_count")
Input(chip, "input_gps_rx")
Output(chip, "output_gps_tx")
Output(chip, "output_tx_freq")
Output(chip, "output_tx_am")
Output(chip, "output_tx_ctl")
Output(chip, "output_leds")
return chip
|
import falcon
class CompaniesResource(object):
companies =[{"id":1,"name":"Company One"},{"id":2,"name":"Company Two"}]
def on_get(self,req,resp):
"""this is request -response cycle from browser to flask dev server"""
resp.body = json.dumps(self.companies)
api = falcon.API()
companies_endpoint = CompaniesResource()
api.add_route('/companies',companies_endpoint)
|
from tensorflow.keras.models import load_model
## Import custom functions and configuration
from loader import get_from_pickle, ConfigLoader
## Here import your model and functions for pre-processing
from model import compile_get_model
def main():
EVALUATE_CONFIG = 'config.json'
config = ConfigLoader(EVALUATE_CONFIG)
config.download_all_files()
model = config.model
weights = config.weights
if model.required:
if model.pickle:
model = get_from_pickle(model.location)
else:
model = load_model(model.location)
else:
model = compile_get_model()
## required when model is not loaded with "load_model"
if weights.required:
if weights.pickle:
weights = get_from_pickle(weights.location)
model.set_weights(weights)
else:
model.load_weights(weights.location)
### Here goes the evaluation code for your own model (use model to predict)
## Ex: model.predict(...)
|
import pathlib
import random
import discord
from discord.ext import commands
import yaml
from bot.bot import Bot
FORM_URL = "https://forms.gle/sb2jNbvVcTorNPTX6"
with pathlib.Path("bot/resources/fun/starters.yaml").open("r", encoding="utf8") as f:
STARTERS = yaml.load(f, Loader=yaml.FullLoader)
class ConversationStarters(commands.Cog):
def __init__(self, bot: Bot):
self.bot = bot
@commands.command(name="topic")
async def topic(self, ctx: commands.Context):
random_topic = random.choice(STARTERS)
topic_embed = discord.Embed(
description=f"you want to suggest a new topic? [click here]({FORM_URL})"
)
topic_embed.title = random_topic
await ctx.send(embed=topic_embed)
def setup(bot: Bot):
"""load the ConversationStarters cog"""
bot.add_cog(ConversationStarters(bot))
|
# Author: Karl Gemayel
# Created: 6/29/20, 3:41 PM
import logging
import os
from textwrap import wrap
import seaborn
import argparse
import numpy as np
import pandas as pd
from typing import *
from functools import reduce
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
# noinspection All
import pathmagic
# noinspection PyUnresolvedReferences
import mg_log # runs init in mg_log and configures logger
# Custom imports
from mg_io.general import save_obj, load_obj
from mg_viz.general import square_subplots, set_size
from mg_general import Environment, add_env_args_to_parser
from mg_stats.shelf import update_dataframe_with_stats, tidy_genome_level, \
_helper_df_joint_reference
from mg_general.general import all_elements_equal, fix_names, next_name, os_join, get_value
from mg_viz.colormap import ColorMap as CM
# ------------------------------ #
# Parse CMD #
# ------------------------------ #
from mg_viz.shelf import number_formatter, update_tool_names_to_full
from mg_viz.stats_large import case_insensitive_match
parser = argparse.ArgumentParser("Visualize statistics collected per gene.")
parser.add_argument('--pf-data', required=True)
parser.add_argument('--ref-5p', required=False, nargs="+", help="Reference(s) on which to compare 5' predictions")
parser.add_argument('--ref-3p', required=False, nargs="+", help="Reference(s) on which to compare 3' predictions")
parser.add_argument('--pf-checkpoint-5p')
parser.add_argument('--pf-checkpoint-3p')
parser.add_argument('--tools', nargs="+", help="If set, only compare these tools. Otherwise all tools are chosen")
parser.add_argument('--parse-names', action='store_true', help="If set, try to shorten genome names. Useful only "
"genome ID's in the data are actually names")
add_env_args_to_parser(parser)
parsed_args = parser.parse_args()
# ------------------------------ #
# Main Code #
# ------------------------------ #
# Load environment variables
my_env = Environment.init_from_argparse(parsed_args)
# Setup logger
logging.basicConfig(level=parsed_args.loglevel)
logger = logging.getLogger("logger") # type: logging.Logger
def get_stats_at_gcfid_level_with_reference(df, tools, reference):
# type: (pd.DataFrame, List[str], str) -> pd.DataFrame
list_entries = list()
for gcfid, df_group in df.groupby("Genome", as_index=False):
result = dict()
for t in tools:
tag = ",".join([t, reference])
tag_eq = "=".join([t, reference])
if df_group[f"3p:Match({tag_eq})"].sum() == 0:
result[f"Match({tag})"] = np.nan
result[f"Number of Error({tag})"] = np.nan
result[f"Error Rate({tag})"] = np.nan
result[f"Number of Error({tag})"] = np.nan
result[f"Number of Match({tag})"] = np.nan
# result[f"Number of Predictions({t},{t})"] = np.nan
else:
result[f"Match({tag})"] = 100 * df_group[f"5p:Match({tag_eq})"].sum() / float(
df_group[f"3p:Match({tag_eq})"].sum())
result[f"Error Rate({tag})"] = 100 - result[f"Match({tag})"]
result[f"Number of Error({tag})"] = df_group[f"3p:Match({tag_eq})"].sum() - df_group[
f"5p:Match({tag_eq})"].sum()
result[f"Number of Found({tag})"] = df_group[f"3p:Match({tag_eq})"].sum()
result[f"Number of Missed({tag})"] = df_group[f"5p-{reference}"].count() - df_group[
f"3p:Match({tag_eq})"].sum()
result[f"Number of Match({tag})"] = df_group[f"5p:Match({tag_eq})"].sum()
# result[f"Number of Predictions({t},{t})"] = df_group[f"5p-{t}"].count()
result[f"Number of IC5p Match({tag})"] = (
df_group[f"5p:Match({tag_eq})"] & df_group[f"Partial5p-{reference}"]).sum()
result[f"Number of IC5p Found({tag})"] = (
df_group[f"3p:Match({tag_eq})"] & df_group[f"Partial5p-{reference}"]).sum()
result[f"IC5p Match({tag})"] = 100 * result[f"Number of IC5p Match({tag})"] / result[
f"Number of IC5p Found({tag})"]
result[f"Number of IC3p Match({tag})"] = (
df_group[f"5p:Match({tag_eq})"] & df_group[f"Partial3p-{reference}"]).sum()
result[f"Number of IC3p Found({tag})"] = (
df_group[f"3p:Match({tag_eq})"] & df_group[f"Partial3p-{reference}"]).sum()
result[f"IC3p Match({tag})"] = 100 * result[f"Number of IC3p Match({tag})"] / result[
f"Number of IC3p Found({tag})"]
result[f"Number of Comp Match({tag})"] = (
df_group[f"5p:Match({tag_eq})"] & ~(
df_group[f"Partial5p-{reference}"] | df_group[f"Partial3p-{reference}"])).sum()
result[f"Number of Comp Found({tag})"] = (
df_group[f"3p:Match({tag_eq})"] & ~(
df_group[f"Partial5p-{reference}"] | df_group[f"Partial3p-{reference}"])).sum()
result[f"Comp Match({tag})"] = 100 * result[f"Number of Comp Match({tag})"] / result[
f"Number of Comp Found({tag})"]
for t in tools + [reference]:
result[f"Number of Predictions({t},{t})"] = df_group[f"5p-{t}"].count()
result[f"Runtime({t},{t})"] = df_group[f"Runtime"].mean()
if t != reference:
result[f"Precision({t},{reference})"] = result[f"Number of Found({t},{reference})"] / result[
f"Number of Predictions({t},{t})"]
result[f"Recall({t},{reference})"] = result[f"Number of Found({t},{reference})"] / df_group[
f"5p-{reference}"].count()
result[f"WR({t},{reference})"] = (result[f"Number of Predictions({t},{t})"] - result[
f"Number of Found({t},{reference})"]) / result[f"Number of Predictions({t},{t})"]
result[f"Sensitivity({t},{reference})"] = result[f"Number of Found({t},{reference})"] / df_group[
f"5p-{reference}"].count()
result[f"Specificity({t},{reference})"] = result[f"Number of Found({t},{reference})"] / result[
f"Number of Predictions({t},{t})"]
# result[f"Runtime({t, t})"] = df_group[f"Runtime"].mean()
result["Genome"] = gcfid
result["Genome GC"] = df_group.at[df_group.index[0], "Genome GC"]
result["Chunk Size"] = df_group.at[df_group.index[0], "Chunk Size"]
result["Number in Reference"] = result[f"Number of Predictions({reference},{reference})"]
list_entries.append(result)
return pd.DataFrame(list_entries)
# def get_stats_at_gcfid_level(df, tools, reference):
# # type: (pd.DataFrame, List[str], str) -> pd.DataFrame
#
# list_entries = list()
#
# ps = powerset(tools, min_len=2)
#
#
#
# for gcfid, df_group in df.groupby("Genome", as_index=False):
# result = dict()
#
# for comb in ps:
# tag = ",".join(comb)
# tag_eq = "=".join(comb)
#
# result[f"Match({tag})"] = 100 * df_group[f"5p:{tag_eq}"].sum()/ float(df_group[f"3p:{tag_eq}"].sum())
#
# result["Genome"] = gcfid
# result["Chunk Size"] = df_group.at[df_group.index[0], "Chunk Size"]
# list_entries.append(result)
#
# return pd.DataFrame(list_entries)
def viz_stats_at_gcfid_level(df, tools):
pass
def ridgeplot(df):
# Create the data
names = sorted(set(df["Genome"]))
x = df["GC Diff"].values
g = df.apply(lambda r: f"{r['Genome']} ({r['Genome GC']:.2f})", axis=1)
df = pd.DataFrame(dict(x=x, g=g))
hue_order = sorted(set(g), key=lambda x: float(x.split("(")[1].split(")")[0]))
# Initialize the FacetGrid object
pal = seaborn.cubehelix_palette(10, rot=-.25, light=.7)
g = seaborn.FacetGrid(df, row="g", hue="g",
hue_order=hue_order,
row_order=hue_order,
aspect=15, height=.5, palette=pal)
# Draw the densities in a few steps
g.map(seaborn.kdeplot, "x", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2)
g.map(seaborn.kdeplot, "x", clip_on=False, color="w", lw=2, bw=.2)
g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Define and use a simple function to label the plot in axes coordinates
def label(x, color, label):
ax = plt.gca()
ax.text(0, .2, label, fontweight="bold", color=color,
ha="left", va="center", transform=ax.transAxes)
g.map(label, "x")
# Set the subplots to overlap
g.fig.subplots_adjust(hspace=-.25)
# Remove axes details that don't play well with overlap
g.set_titles("")
g.set(yticks=[])
g.despine(bottom=True, left=True)
plt.show()
def number_and_match(env, df_total, hue_order, col_number, col_perc, sup_title):
g = seaborn.FacetGrid(df_total, col="Genome", col_wrap=4, hue="Tool", sharey=False)
g.map(plt.plot, "Chunk Size", col_number, linestyle="dashed")
# g.map(plt.plot, "x", "y_fit")
g.set_xlabels("Chunk Size")
g.set_titles("{col_name}")
g.set(ylim=(0, None))
g.set(xlim=(0, 5100))
g.set_ylabels("Number of predictions")
g.add_legend()
for ax, (_, subdata) in zip(g.axes, df_total.groupby('Genome')):
ax2 = ax.twinx()
subdata = subdata.sort_values("Chunk Size")
for hue in hue_order:
subdata_hue = subdata[subdata["Tool"] == hue]
ax2.plot(subdata_hue["Chunk Size"], subdata_hue[col_perc], label=hue)
ax2.set_ylim(40, 100)
# subdata.plot(x='data_sondage', y='impossible', ax=ax2, legend=False, color='r')
plt.tight_layout()
plt.savefig(next_name(env["pd-work"]))
plt.suptitle(sup_title)
plt.show()
def viz_number_of_predictions_for_short(env, df):
# type: (Environment, pd.DataFrame) -> None
df = df[df["Tool"] != "VERIFIED"]
hue_order = sorted(df["Tool"].unique())
df["Found%"] = 100 * df["Number of Found"] / df["Number of Predictions"]
g = seaborn.FacetGrid(df, col="Genome", col_wrap=4, hue="Tool", sharey=True)
xlim = (0, 5100)
g.map(plt.plot, "Chunk Size", "Number of Found", linestyle="dashed")
g.map(plt.plot, "Chunk Size", "Number of Predictions")
for ax in g.axes:
ax.yaxis.set_major_formatter(FuncFormatter(number_formatter))
# g.map(plt.plot, "x", "y_fit")
g.set_xlabels("Fragment Size (nt)")
g.set_titles("{col_name}", style="italic")
g.set(ylim=(0, None))
g.set(xlim=xlim)
g.set_ylabels("Number of predictions")
# for ax, (_, subdata) in zip(g.axes, df.groupby('Genome')):
# # ax2 = ax.twinx()
# ax2 = inset_axes(ax, width="50%", height="50%", loc=1, borderpad=1)
#
# subdata = subdata.sort_values("Chunk Size")
# for hue in hue_order:
# subdata_hue = subdata[subdata["Tool"] == hue]
# ax2.plot(subdata_hue["Chunk Size"], subdata_hue["Found%"], label=hue)
# ax2.set_ylim(40,100)
# ax2.set_ylabel("TPR")
# ax2.set_xlim(*xlim)
# ax2.set_xticks([])
# ax2.set_yticks([])
#
# # subdata.plot(x='data_sondage', y='impossible', ax=ax2, legend=False, color='r')
# plt.tight_layout()
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
g = seaborn.FacetGrid(df, col="Genome", col_wrap=4, hue="Tool", sharey=False)
g.map(plt.plot, "Chunk Size", "Found%")
# g.map(plt.plot, "Chunk Size", "Number of Found", linestyle="dashed")
# g.map(plt.plot, "x", "y_fit")
g.set_xlabels("Fragment Size (nt)")
g.set_titles("{col_name}", style="italic")
g.set(ylim=(0, None))
g.set(xlim=(0, 5100))
g.set_ylabels("Number of predictions")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
df = df[df["Tool"].isin({"MGM2", "MPRODIGAL", "FGS", "MGA"})]
g = seaborn.FacetGrid(df, col="Genome", col_wrap=4, hue="Tool", sharey=True)
g.map(plt.plot, "Recall", "Precision")
# g.map(plt.plot, "Chunk Size", "Number of Found", linestyle="dashed")
# g.map(plt.plot, "x", "y_fit")
# g.set_xlabels("Fragment Size (nt)")
g.set_titles("{col_name}", style="italic")
g.set(ylim=(0, 1))
g.set(xlim=(0, 1))
# g.set_ylabels("Number of predictions")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
g = seaborn.FacetGrid(df, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "WR", linestyle="dashed")
g.map(plt.plot, "Chunk Size", "Precision")
# g.map(plt.plot, "Chunk Size", "Number of Found", linestyle="dashed")
# g.map(plt.plot, "x", "y_fit")
g.set_titles("{col_name}", style="italic")
g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Score")
g.add_legend()
# inset_ylim=(0, max(df["Number of Predictions"])+100)
# for ax, (_, subdata) in zip(g.axes, df.groupby('Genome')):
# # ax2 = ax.twinx()
# ax2 = inset_axes(ax, width="40%", height="40%", loc=7, borderpad=1)
#
# subdata = subdata.sort_values("Chunk Size")
# for hue in hue_order:
# subdata_hue = subdata[subdata["Tool"] == hue]
# ax2.plot(subdata_hue["Chunk Size"], subdata_hue["Number of Predictions"], label=hue,
# color=CM.get_map("tools")[hue])
# # ax2.set_ylim(40,100)
# ax2.set_ylabel("Total Predictions")
# ax2.set_xlim(*xlim)
# ax2.set_ylim(*inset_ylim)
#
# ax2.yaxis.set_major_formatter(FuncFormatter(number_formatter))
# ax2.set_xticks([])
# # ax2.set_yticks([])
#
# # subdata.plot(x='data_sondage', y='impossible', ax=ax2, legend=False, color='r')
plt.savefig(next_name(env["pd-work"]))
plt.show()
def viz_plot_per_genome_y_error_x_chunk(env, df):
genomes = sorted(df["Genome"].unique())
nrows, ncols = square_subplots(len(genomes))
values_to_melt = ["Match", "Number of Error", "Number of Found", "Number of Match", "Number of Predictions",
"Number of IC5p Match", "Number of IC5p Found", "Number of IC3p Match", "Number of IC3p Found",
"Number of Comp Match", "Number of Comp Found", "Precision", "Recall", "WR", "Number of Missed",
"IC3p Match", "IC5p Match", "Comp Match"]
df_total = list()
for v in values_to_melt:
if v == "Precision":
print('hi')
df_curr = pd.melt(df, id_vars=["Genome", "Chunk Size", "Genome GC"],
value_vars=[x for x in df.columns if v == x.split("(")[0].strip()],
var_name="Combination", value_name=v)
df_curr["Tool"] = df_curr["Combination"].apply(lambda x: x.split("(")[1].split(",")[0].upper())
df_total.append(df_curr)
df_total = reduce(lambda df1, df2: pd.merge(df1, df2, on=["Genome", "Chunk Size", "Genome GC", "Tool"],
how="outer"), df_total)
viz_number_of_predictions_for_short(env, df_total)
# return
# df_total = pd.melt(
# df_total,
# id_vars=["Genome", "Chunk Size", "Genome GC", "Combination"],
# value_vars=values_to_melt,
# var_name="Metric", value_name="Score")
# dfs = [df_tmp.set_index(["Genome", "Chunk Size", "Genome GC"]) for df_tmp in df_total]
# dfs = pd.concat(dfs, ignore_index=True, sort=False, axis=1)
hue_order = sorted(df_total["Tool"].unique())
g = seaborn.FacetGrid(df_total, col="Genome", col_wrap=4, hue="Tool", sharey=True, hue_order=hue_order)
# g.map(plt.plot, "Chunk Size", "Match", marker="o")
g.map(plt.plot, "Chunk Size", "Number of Found", linestyle="--")
# g.map(plt.plot, "x", "y_fit")
g.set_xlabels("Chunk Size")
g.set_ylabels("Metric")
g.set(ylim=(0, None))
g.set(xlim=(None, None))
g.add_legend()
for ax, (_, subdata) in zip(g.axes, df_total.groupby('Genome')):
ax2 = ax.twinx()
subdata = subdata.sort_values("Chunk Size")
for hue in hue_order:
subdata_hue = subdata[subdata["Tool"] == hue]
ax2.plot(subdata_hue["Chunk Size"], subdata_hue["Match"], label=hue)
ax2.set_ylim(40, 100)
# subdata.plot(x='data_sondage', y='impossible', ax=ax2, legend=False, color='r')
plt.tight_layout()
plt.savefig(next_name(env["pd-work"]))
plt.show()
g = seaborn.FacetGrid(df_total, col="Genome", col_wrap=4, hue="Tool", sharey=False)
g.map(plt.plot, "Chunk Size", "Number of Predictions")
# g.map(plt.plot, "x", "y_fit")
g.set_xlabels("Chunk Size")
g.set_titles("{col_name}")
g.set(ylim=(0, None))
# g.set(xlim=(None,5100))
g.set_ylabels("Number of predictions")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
# Incomplete
g = seaborn.FacetGrid(df_total, col="Genome", col_wrap=4, hue="Tool", sharey=False)
g.map(plt.plot, "Chunk Size", "Number of IC5p Found", linestyle="dashed")
# g.map(plt.plot, "x", "y_fit")
g.set_xlabels("Chunk Size")
g.set_titles("{col_name}")
g.set(ylim=(0, None))
# g.set(xlim=(0, 5100))
g.set_ylabels("Number of predictions")
g.add_legend()
for ax, (_, subdata) in zip(g.axes, df_total.groupby('Genome')):
ax2 = ax.twinx()
subdata = subdata.sort_values("Chunk Size")
for hue in hue_order:
subdata_hue = subdata[subdata["Tool"] == hue]
ax2.plot(subdata_hue["Chunk Size"], subdata_hue["IC5p Match"], label=hue)
ax2.set_ylim(40, 100)
# subdata.plot(x='data_sondage', y='impossible', ax=ax2, legend=False, color='r')
plt.tight_layout()
plt.savefig(next_name(env["pd-work"]))
plt.suptitle("IC5p")
plt.show()
number_and_match(env, df_total, hue_order, "Number of IC5p Match", "IC5p Match", "IC5p")
number_and_match(env, df_total, hue_order, "Number of IC3p Match", "IC3p Match", "IC3p")
number_and_match(env, df_total, hue_order, "Number of Comp Match", "Comp Match", "Comp")
df_comprehensive = df_total.groupby(["Chunk Size", "Tool"], as_index=False).sum()
df_comprehensive = pd.melt(df_comprehensive, id_vars=["Chunk Size", "Tool"],
value_vars=[f"Number of {x} Match" for x in ["IC3p", "IC5p", "Comp"]] + [
"Number of Match"],
var_name="Partial", value_name="Value")
df_comprehensive_2 = pd.melt(df_total.groupby(["Chunk Size", "Tool"], as_index=False).sum(),
id_vars=["Chunk Size", "Tool"],
value_vars=[f"Number of {x} Found" for x in ["IC3p", "IC5p", "Comp"]] + [
"Number of Found"],
var_name="Partial", value_name="Value")
df_comprehensive["Match"] = 100 * df_comprehensive["Value"] / df_comprehensive_2["Value"]
g = seaborn.lmplot("Chunk Size", "Match", data=df_comprehensive, hue="Tool", col="Partial", lowess=True)
g.set(xlim=(0, 5010), ylim=(0, 100))
plt.show()
print(df_comprehensive.to_csv())
return
fig, axes = plt.subplots(2, 4, sharey="all", sharex="all")
axes = axes.ravel()
for i, g in enumerate(genomes):
ax = axes[i] # type: plt.Axes
df_curr = df[df["Genome"] == g]
df_curr = pd.melt(df_curr, id_vars=["Genome", "Chunk Size"],
value_vars=[x for x in df_curr.columns if "Number of Error(" in x],
var_name="Combination", value_name="Number of Error")
seaborn.lineplot("Chunk Size", "Number of Error", data=df_curr, hue="Combination", ax=ax, legend=False)
plt.show()
fig, axes = plt.subplots(2, 4, sharey="all", sharex="all")
axes = axes.ravel()
for i, g in enumerate(genomes):
ax = axes[i] # type: plt.Axes
df_curr = df[df["Genome"] == g]
df_curr = pd.melt(df_curr, id_vars=["Genome", "Chunk Size"],
value_vars=[x for x in df_curr.columns if "Number of Found(" in x],
var_name="Combination", value_name="Number of Found")
seaborn.lineplot("Chunk Size", "Number of Found", data=df_curr, hue="Combination", ax=ax, legend=False)
plt.show()
def viz_plot_per_genome_5p(env, df_gcfid):
# type: (Environment, pd.DataFrame) -> None
pass
def viz_stats_genome_level(env, df_gcfid, tools, reference, **kwargs):
# type: (Environment, pd.DataFrame, List[str], str, Dict[str, Any]) -> None
# 3' analysis
viz_plot_per_genome_y_error_x_chunk(env, df_gcfid)
# 5' analysis
viz_plot_per_genome_5p(env, df_gcfid)
def viz_stats_3p_number_of_predictions_number_of_found(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "Number of Predictions", linestyle="dashed")
g.map(plt.plot, "Chunk Size", "Number of Found")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Score")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
def viz_stats_3p_sensitivity_specificity(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "Sensitivity")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Sensitivity")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "Specificity")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Specificity")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Specificity", "Sensitivity")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
# g.set(xlim=(0, 5100))
g.set_ylabels("Sensitivity")
g.set_xlabels("Specificity")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
def viz_stats_3p_number_of_predictions_precision(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
df_tidy = df_tidy[df_tidy["Tool"].apply(lambda x: x.lower()) != reference.lower()]
df_tidy = df_tidy[df_tidy["Tool"].apply(lambda x: x.lower()) != "mgm"]
hue_order = sorted(df_tidy["Tool"].unique())
cw = 4
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=cw, hue="Tool", hue_order=hue_order,
sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "Number of Predictions", linestyle="dashed")
g.map(plt.plot, "Chunk Size", "Precision")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Number of Predictions")
g.add_legend()
counter = 0
for ax, (_, subdata) in zip(g.axes, df_tidy.groupby('Genome')):
ax.yaxis.set_major_formatter(FuncFormatter(number_formatter))
if counter == 0:
yticklabels = ax.get_yticklabels()
ax2 = ax.twinx()
subdata = subdata.sort_values("Chunk Size")
for hue in hue_order:
subdata_hue = subdata[subdata["Tool"] == hue]
ax2.plot(subdata_hue["Chunk Size"], subdata_hue["Precision"], label=hue,
color=CM.get_map("tools")[hue.lower()])
ax2.set_ylim(0, 1)
counter += 1
if counter % cw == 0:
ax2.set_ylabel("Precision")
else:
ax2.set_yticks([])
# if counter % cw != 1:
# ax.set_yticklabels([])
# else:
# ax.set_yticklabels(yticklabels)
plt.tight_layout()
plt.savefig(next_name(env["pd-work"]))
plt.show()
# tabulate
# df_piv = df_tidy.pivot(index="Genome", columns="Tool", values=["Precision"])
# print(df_piv.to_csv())
def f_mi(x):
d = []
d.append(x['a'].sum())
d.append(x['a'].max())
d.append(x['b'].mean())
d.append((x['c'] * x['d']).sum())
return pd.Series(d, index=[['a', 'a', 'b', 'c_d'],
['sum', 'max', 'mean', 'prodsum']])
df1 = df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).sum()
# df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).agg(
# {**{x: ['sum'] for x in df_tidy.columns if x not in {"Chunk Size", "Tool", "Number in Reference"}},
# 'Number in Reference': ['sum']})
df1["Precision"] = df1["Number of Found"] / df1["Number of Predictions"]
df1["WR"] = (df1["Number of Predictions"] - df1["Number of Found"]) / df1["Number of Predictions"]
df1["Sensitivity"] = df1["Number of Found"] / df1["Number in Reference"]
df1["Specificity"] = df1["Number of Found"] / df1["Number of Predictions"]
print(df1.pivot(index="Chunk Size", columns="Tool", values=["Precision", "Number of Found"]))
print(df1.pivot(index="Chunk Size", columns="Tool", values=["Precision"]))
print(df1.pivot(index="Chunk Size", columns="Tool",
values=["Precision", "Number of Missed", "Number of Predictions"]))
print(df1.pivot(index="Chunk Size", columns="Tool",
values=["Sensitivity", "Specificity", "Number of Found", "Number in Reference",
"Number of Predictions"]).to_csv())
print("hi")
df1 = df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).mean()
print(df1.pivot(index="Chunk Size", columns="Tool",
values=["Sensitivity", "Specificity"]).to_csv())
def viz_stats_5p_number_of_errors_number_of_found(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "Number of Found", linestyle="dashed")
g.map(plt.plot, "Chunk Size", "Number of Error")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Sensitivity")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
def viz_stats_5p_error_rate(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "Error Rate")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Error Rate")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
def viz_stats_5p_error_rate_partial(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
df_tidy = df_tidy[df_tidy["Tool"].apply(lambda x: x.lower()) != "verified"].copy()
for cond in ["IC5p", "IC3p", "Comp"]:
df_tidy[f"Error Rate {cond}"] = (df_tidy[f"Number of {cond} Found"] - df_tidy[f"Number of {cond} Match"]) / \
df_tidy[f"Number of {cond} Found"]
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", f"Error Rate {cond}")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Sizee (nt)")
g.set_ylabels("Error Rate")
g.add_legend()
plt.suptitle({
"IC5p": "Incomplete at 5' end",
"IC3p": "Incomplete at 3' end",
"Comp": "Complete genes"
}[cond])
plt.savefig(next_name(env["pd-work"]))
plt.show()
# show 5p error by condition (combine all tools)
df2 = df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).sum()
df2_tidy = pd.melt(
df2, id_vars=["Chunk Size", "Tool"],
value_vars=[f"Number of {cond} Found" for cond in ["IC5p", "IC3p", "Comp"]],
var_name="Condition", value_name="Found"
)
df2_tidy["Condition"] = df2_tidy["Condition"].apply(lambda x: x.split()[2])
df_tmp = pd.melt(
df2, id_vars=["Chunk Size", "Tool"],
value_vars=[f"Number of {cond} Match" for cond in ["IC5p", "IC3p", "Comp"]],
var_name="Condition", value_name="Match"
)
df_tmp["Condition"] = df_tmp["Condition"].apply(lambda x: x.split()[2])
df2_tidy = reduce(lambda df1, df2: pd.merge(df1, df2, on=["Chunk Size", "Condition", "Tool"],
how="outer"), [df2_tidy, df_tmp])
df2_tidy[f"Error Rate"] = (df2_tidy[f"Found"] - df2_tidy[f"Match"]) / df2_tidy[f"Found"]
df2_tidy["Condition"].replace({
"IC5p": "Incomplete at Gene Start",
"IC3p": "Incomplete at Gene End",
"Comp": "Complete genes"
}, inplace=True)
hue_order = sorted(df_tidy["Tool"].unique())
g = seaborn.FacetGrid(df2_tidy, col="Condition", hue="Tool", sharey=True, palette=CM.get_map("tools"),
hue_order=hue_order)
g.map(plt.plot, "Chunk Size", f"Error Rate")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Gene-Start Error Rate")
g.add_legend()
for ax, (_, subdata) in zip(g.axes[0], df2_tidy.groupby('Condition')):
ax2 = ax.twinx()
subdata = subdata.sort_values("Chunk Size")
for hue in hue_order:
subdata_hue = subdata[subdata["Tool"] == hue]
ax2.plot(subdata_hue["Chunk Size"], subdata_hue["Found"], label=hue, linestyle="dashed")
# ax2.set_ylim(40, 100)
# subdata.plot(x='data_sondage', y='impossible', ax=ax2, legend=False, color='r')
plt.savefig(next_name(env["pd-work"]))
plt.show()
###################### 2-level facetgrid ######################
# show 5p error by condition (combine all tools)
df2 = df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).sum()
df2["Number of Comp Found"] += df2["Number of IC3p Found"]
df2["Number of Comp Match"] += df2["Number of IC3p Match"]
df2_tidy = pd.melt(
df2, id_vars=["Chunk Size", "Tool"],
value_vars=[f"Number of {cond} Found" for cond in ["IC5p", "Comp"]],
var_name="Condition", value_name="Score"
)
df2_tidy["Condition"] = df2_tidy["Condition"].apply(lambda x: x.split()[2])
df2_tidy["Metric"] = "Found"
df_tmp = pd.melt(
df2, id_vars=["Chunk Size", "Tool"],
value_vars=[f"Number of {cond} Match" for cond in ["IC5p", "Comp"]],
var_name="Condition", value_name="Match"
)
df_tmp["Condition"] = df_tmp["Condition"].apply(lambda x: x.split()[2])
df_tmp = reduce(lambda df1, df2: pd.merge(df1, df2, on=["Chunk Size", "Condition", "Tool"],
how="outer"), [df2_tidy, df_tmp])
df_tmp[f"Score"] = (df_tmp[f"Score"] - df_tmp[f"Match"]) / df_tmp[f"Score"]
df_tmp["Metric"] = "Error Rate"
df2_tidy = pd.concat([df2_tidy, df_tmp])
df2_tidy["Condition"].replace({
"IC5p": "Incomplete at Gene Start",
# "IC3p": "Incomplete at Gene End",
"Comp": "Complete at Gene Start"
}, inplace=True)
df2_tidy = df2_tidy[df2_tidy["Chunk Size"] <= 5000]
hue_order = sorted(df2_tidy["Tool"].unique())
g = seaborn.FacetGrid(
df2_tidy, col="Condition", hue="Tool", sharey="row", palette=CM.get_map("tools"),
row="Metric", hue_order=hue_order
)
g.map(plt.plot, "Chunk Size", f"Score")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
# g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
# g.set_ylabels("Gene-Start Error Rate")
for i, axes_row in enumerate(g.axes):
for j, axes_col in enumerate(axes_row):
if j == 0:
if i == 0:
axes_col.set_ylabel("Number of Genes Found")
else:
axes_col.set_ylabel("Gene-Start Error Rate")
g.add_legend()
plt.tight_layout(rect=[0, 0, 0.8, 1])
plt.savefig(next_name(env["pd-work"]))
plt.show()
# paper
df2_tidy.loc[df2_tidy["Tool"] == "MPRODIGAL", "Tool"] = "MProdigal"
hue_order = sorted(df2_tidy["Tool"].unique())
figsize = set_size("thesis", subplots=(2,2), legend=True, titles=True)
fig, axes = plt.subplots(2, 2, sharex="all", sharey="row", figsize=figsize)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Found") & (df2_tidy["Condition"] == "Incomplete at Gene Start") & (df2_tidy["Tool"] == h)
axes[0][0].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Found") & (df2_tidy["Condition"] == "Complete at Gene Start") & (df2_tidy["Tool"] == h)
axes[0][1].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Error Rate") & (df2_tidy["Condition"] == "Incomplete at Gene Start") & (df2_tidy["Tool"] == h)
axes[1][0].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Error Rate") & (df2_tidy["Condition"] == "Complete at Gene Start") & (df2_tidy["Tool"] == h)
axes[1][1].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
axes[0][0].set_title("Incomplete at Gene Start", style="italic")
axes[0][1].set_title("Complete at Gene Start", style="italic")
axes[0][0].yaxis.set_major_formatter(FuncFormatter(number_formatter))
axes[0][1].yaxis.set_major_formatter(FuncFormatter(number_formatter))
axes[1][0].set_xlabel("Fragment Size (nt)")
axes[1][1].set_xlabel("Fragment Size (nt)")
axes[0][0].set_ylabel("Number of Genes Found")
axes[1][0].set_ylabel("Gene 5' Error Rate")
handles, labels = axes[0][0].get_legend_handles_labels()
labels = update_tool_names_to_full(labels)
# leg = fig.legend(handles, labels, bbox_to_anchor=(0.5, 0.1), loc='upper center', ncol=5,
# bbox_transform=fig.transFigure, frameon=False)
fig.align_ylabels(axes[:,0])
# plt.tight_layout(rect=[0, 0.1, 1, 1])
leg = fig.legend(handles, labels, bbox_to_anchor=(1.05, 0.5), loc='center left', frameon=False)
# fig.subplots_adjust(right=0.85)
fig.tight_layout()
fig.savefig(next_name(env["pd-work"]), bbox_extra_artists=(leg,), bbox_inches='tight')
plt.show()
# thesis
figsize = set_size("thesis", subplots=(2, 2), legend=True, titles=True)
fig, axes = plt.subplots(2, 2, sharex="all", sharey="row", figsize=figsize)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Found") & (df2_tidy["Condition"] == "Incomplete at Gene Start") & (
df2_tidy["Tool"] == h)
axes[0][0].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Found") & (df2_tidy["Condition"] == "Complete at Gene Start") & (
df2_tidy["Tool"] == h)
axes[0][1].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Error Rate") & (df2_tidy["Condition"] == "Incomplete at Gene Start") & (
df2_tidy["Tool"] == h)
axes[1][0].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Error Rate") & (df2_tidy["Condition"] == "Complete at Gene Start") & (
df2_tidy["Tool"] == h)
axes[1][1].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
axes[0][0].set_title("Incomplete at Gene Start", style="italic")
axes[0][1].set_title("Complete at Gene Start", style="italic")
axes[0][0].yaxis.set_major_formatter(FuncFormatter(number_formatter))
axes[0][1].yaxis.set_major_formatter(FuncFormatter(number_formatter))
axes[1][0].set_xlabel("Fragment Size (nt)")
axes[1][1].set_xlabel("Fragment Size (nt)")
axes[0][0].set_ylabel("Number of Genes Found")
axes[1][0].set_ylabel("Gene Start Error Rate")
handles, labels = axes[0][0].get_legend_handles_labels()
labels = update_tool_names_to_full(labels)
# leg = fig.legend(handles, labels, bbox_to_anchor=(0.5, 0.1), loc='upper center', ncol=5,
# bbox_transform=fig.transFigure, frameon=False)
fig.align_ylabels(axes[:, 0])
# plt.tight_layout(rect=[0, 0.1, 1, 1])
leg = fig.legend(handles, labels, bbox_to_anchor=(1.05, 0.5), loc='center left', frameon=False)
# fig.subplots_adjust(right=0.85)
fig.tight_layout()
fig.savefig(next_name(env["pd-work"]), bbox_extra_artists=(leg,), bbox_inches='tight')
plt.show()
def viz_stats_5p_partial(env, df_tidy, tool_order, reference):
# show 5p error by condition (combine all tools)
df2 = df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).sum()
df2["Number of Comp Found"] += df2["Number of IC3p Found"]
df2["Number of Comp Match"] += df2["Number of IC3p Match"]
df2[f"Error Rate Comp"] = (df2[f"Number of Comp Found"] - df2[f"Number of Comp Match"]) / df2[f"Number of Comp Found"]
df2[f"Error Rate IC5p"] = (df2[f"Number of IC5p Found"] - df2[f"Number of IC5p Match"]) / df2[
f"Number of IC5p Found"]
figsize = set_size("thesis", subplots=(2,2), legend="bottom")
fig, axes = plt.subplots(2, 2, figsize=figsize, sharey="row")
reg_kws = {"lowess": True, "scatter_kws": {"s": 0.1, "alpha": 0.3},
"line_kws": {"linewidth": 1}}
from collections import abc
axes_unr = axes
if not isinstance(axes, abc.Iterable):
axes = [axes]
else:
axes = axes.ravel()
ax = None
i = 0
fontsize = "xx-small"
for ax, col in zip(axes[0:2], ["Number of IC5p Found", "Number of Comp Found"]):
for t in tool_order:
if t.lower() == reference.lower():
continue
df_curr = df2[case_insensitive_match(df2, "Tool", t)]
seaborn.regplot(
df_curr["Chunk Size"], df_curr[col], label=t, color=CM.get_map("tools")[t.lower()],
**reg_kws, ax=ax
)
if max(df2[col]) > 2000:
ax.yaxis.set_major_formatter(FuncFormatter(number_formatter))
col_text = "\n".join(wrap(col, 20, break_long_words=False))
ax.set_ylabel(col_text, wrap=True, fontsize=fontsize)
ax.tick_params(labelsize=fontsize, length=2)
if i == 0:
ax.set_ylabel("Number of Genes Found", fontsize=fontsize)
else:
ax.set_ylabel("")
ax.set_xlabel("")
i += 1
for ax, col in zip(axes[2:], ["Error Rate IC5p", "Error Rate Comp"]):
for t in tool_order:
if t.lower() == reference.lower():
continue
df_curr = df2[case_insensitive_match(df_tidy, "Tool", t)]
seaborn.regplot(
df_curr["Chunk Size"], df_curr[col], label=t, color=CM.get_map("tools")[t.lower()],
**reg_kws, ax=ax
)
if len(df_curr[col]) > 0 and max(df_curr[col]) > 2000:
ax.yaxis.set_major_formatter(FuncFormatter(number_formatter))
col_text = "\n".join(wrap(col, 20, break_long_words=False))
ax.set_ylabel(col_text, wrap=True, fontsize=fontsize)
ax.tick_params(labelsize=fontsize, length=2)
if i == 0:
ax.set_ylabel("Gene-Start Error Rate", fontsize=fontsize)
else:
ax.set_ylabel("")
ax.set_xlabel("Fragment Size (nt)")
i += 1
if ax is not None:
fig.subplots_adjust(bottom=0.2)
handles, labels = ax.get_legend_handles_labels()
# labels = [{
# "mgm": "MGM",
# "mgm2": "MGM2",
# "mgm2_auto": "MGM2",
# "mga": "MGA",
# "mprodigal": "MProdigal",
# "fgs": "FGS",
# "gms2": "GMS2",
# "prodigal": "Prodigal"
# }[l.lower()] for l in labels]
labels = update_tool_names_to_full(labels)
leg = fig.legend(handles, labels, bbox_to_anchor=(0.5, 0.1), loc='upper center', ncol=len(tool_order),
bbox_transform=fig.transFigure, frameon=False,
fontsize=fontsize)
for lh in leg.legendHandles:
lh.set_alpha(1)
lh.set_sizes([18] * (len(tool_order)))
for i in range(2):
fig.align_ylabels(axes_unr[:, i])
fig.tight_layout(rect=[0, 0.1, 1, 1])
fig.savefig(next_name(env["pd-work"]), bbox_extra_artists=(leg,)) # bbox_inches='tight'
plt.show()
def _helper_join_reference_and_tidy_data(env, df_per_gene, tools, list_ref):
# type: (Environment, pd.DataFrame, List[str], List[str]) -> [str, pd.DataFrame]
reference = _helper_df_joint_reference(df_per_gene, list_ref)
df_per_gene = update_dataframe_with_stats(df_per_gene, tools, reference).copy()
#### Genome Level
# compute stats per genome
df_stats_gcfid = list()
for _, df_group in df_per_gene.groupby("Chunk Size", as_index=False):
df_stats_gcfid.append(get_stats_at_gcfid_level_with_reference(df_group, tools, reference))
df_per_genome = pd.concat(df_stats_gcfid, ignore_index=True, sort=False)
df_tidy = tidy_genome_level(env, df_per_genome)
df_tidy = df_tidy[df_tidy["Tool"].apply(lambda x: x.lower()).isin(tools + [reference])]
return reference, df_tidy
def viz_stats_3p_missed_vs_length(env, df_per_gene, reference):
# type: (Environment, pd.DataFrame, str) -> None
pass
def viz_stats_3p_sensitivity_specificity_collective(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
df2 = df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).sum()
df2["Sensitivity"] = df2["Number of Found"] / df2["Number in Reference"]
df2["Specificity"] = df2["Number of Found"] / df2["Number of Predictions"]
df2["False Positive"] = df2["Number of Predictions"] - df2["Number of Found"]
df2_no_ref = df2[df2["Tool"].apply(lambda x: x.lower()) != reference.lower()]
df2_ref = df2[df2["Tool"].apply(lambda x: x.lower()) == reference.lower()]
df2_no_ref.loc[df2_no_ref["Tool"] == "MPRODIGAL", "Tool"] = "MProdigal"
tools = list(df2_no_ref["Tool"].unique())
# fig, axes = plt.subplots(2, 2)
#
# for col, ax in zip(["Sensitivity", "Specificity"], axes[0]):
# seaborn.lineplot("Chunk Size", col, data=df2_no_ref, hue="Tool", ax=ax,
# hue_order=tools,
# palette=CM.get_map("tools"), legend=False)
# ax.set_ylim(0, 1)
#
# # ax.set(adjustable='box-forced', aspect='equal')
#
# plt.legend(tools, bbox_to_anchor=(1.05, 0.5), loc="center left")
#
# for col, ax in zip(["Number of Missed", "False Positive"], axes[1]):
# seaborn.lineplot("Chunk Size", col, data=df2_no_ref, hue="Tool", ax=ax,
# palette=CM.get_map("tools"), legend=False)
#
# # add dashed for number of genes in reference
# ax.plot(df2_ref["Chunk Size"], df2_ref["Number in Reference"], linestyle="dashed",
# color=CM.get_map("tools")[reference.lower()])
# ax.yaxis.set_major_formatter(FuncFormatter(number_formatter))
# plt.savefig(next_name(env["pd-work"]))
#
# plt.show()
fig, axes = plt.subplots(2, 2)
for col, ax in zip(["Sensitivity", "Specificity"], axes[0]):
for t in tools:
df_curr = df2_no_ref[df2_no_ref["Tool"] == t]
ax.plot(df_curr["Chunk Size"], df_curr[col], color=CM.get_map("tools")[t.lower()],
label=t)
# seaborn.lineplot("Chunk Size", col, data=df2_no_ref, hue="Tool", ax=ax,
# hue_order=tools,
# palette=CM.get_map("tools"))
ax.set_ylabel(col)
ax.set_ylim(0, 1)
# ax.set(adjustable='box-forced', aspect='equal')
for i, (col, ax) in enumerate(zip(["Number of Missed", "False Positive"], axes[1])):
for t in tools:
df_curr = df2_no_ref[df2_no_ref["Tool"] == t]
ax.plot(df_curr["Chunk Size"], df_curr[col], color=CM.get_map("tools")[t.lower()],
label=t)
# seaborn.lineplot("Chunk Size", col, data=df2_no_ref, hue="Tool", ax=ax,
# palette=CM.get_map("tools"))
# add dashed for number of genes in reference
ax.plot(df2_ref["Chunk Size"], df2_ref["Number in Reference"], linestyle="dashed",
color=CM.get_map("tools")[reference.lower()], label="RefSeq")
ax.set_ylabel(col)
ax.set_xlabel("Fragment Size (nt)")
ax.yaxis.set_major_formatter(FuncFormatter(number_formatter))
# plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# fig.legend(loc="center right")
handles, labels = ax.get_legend_handles_labels()
labels = update_tool_names_to_full(labels)
leg = fig.legend(handles, labels, bbox_to_anchor=(1.05, 0.5), loc='center left', frameon=False)
# fig.subplots_adjust(right=0.85)
fig.align_ylabels(axes[:,0])
fig.tight_layout()
fig.savefig(next_name(env["pd-work"]), bbox_extra_artists=(leg,), bbox_inches='tight')
# plt.savefig(next_name(env["pd-work"]))
plt.show()
def viz_stats_3p_sensitivity_specificity_inv(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
df2 = df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).sum()
df2["Sensitivity"] = df2["Number of Found"] / df2["Number in Reference"]
df2["Specificity"] = df2["Number of Found"] / df2["Number of Predictions"]
df2["3' FP Error Rate"] = 1 - df2["Specificity"]
df2["3' FN Error Rate"] = 1 - df2["Sensitivity"]
df2["False Positive"] = df2["Number of Predictions"] - df2["Number of Found"]
df2_no_ref = df2[df2["Tool"].apply(lambda x: x.lower()) != reference.lower()]
df2_ref = df2[df2["Tool"].apply(lambda x: x.lower()) == reference.lower()]
df2_no_ref.loc[df2_no_ref["Tool"] == "MPRODIGAL", "Tool"] = "MProdigal"
tools = list(df2_no_ref["Tool"].unique())
# fig, axes = plt.subplots(2, 2)
#
# for col, ax in zip(["Sensitivity", "Specificity"], axes[0]):
# seaborn.lineplot("Chunk Size", col, data=df2_no_ref, hue="Tool", ax=ax,
# hue_order=tools,
# palette=CM.get_map("tools"), legend=False)
# ax.set_ylim(0, 1)
#
# # ax.set(adjustable='box-forced', aspect='equal')
#
# plt.legend(tools, bbox_to_anchor=(1.05, 0.5), loc="center left")
#
# for col, ax in zip(["Number of Missed", "False Positive"], axes[1]):
# seaborn.lineplot("Chunk Size", col, data=df2_no_ref, hue="Tool", ax=ax,
# palette=CM.get_map("tools"), legend=False)
#
# # add dashed for number of genes in reference
# ax.plot(df2_ref["Chunk Size"], df2_ref["Number in Reference"], linestyle="dashed",
# color=CM.get_map("tools")[reference.lower()])
# ax.yaxis.set_major_formatter(FuncFormatter(number_formatter))
# plt.savefig(next_name(env["pd-work"]))
#
# plt.show()
fig, axes = plt.subplots(1, 2, figsize=set_size("thesis", subplots=(2,2)))
# here
for col, ax in zip(["3' FN Error Rate", "3' FP Error Rate"], axes):
for t in tools:
df_curr = df2_no_ref[df2_no_ref["Tool"] == t]
ax.plot(df_curr["Chunk Size"], df_curr[col], color=CM.get_map("tools")[t.lower()],
label=t)
# seaborn.lineplot("Chunk Size", col, data=df2_no_ref, hue="Tool", ax=ax,
# hue_order=tools,
# palette=CM.get_map("tools"))
ax.set_ylabel(col)
ax.set_ylim(0, 0.5)
ax.set_xlabel("Fragment Size (nt)")
# fig.legend(loc="center right")
handles, labels = ax.get_legend_handles_labels()
labels = update_tool_names_to_full(labels)
leg = fig.legend(handles, labels, bbox_to_anchor=(1.05, 0.5), loc='center left', frameon=False)
# fig.subplots_adjust(right=0.85)
fig.tight_layout()
fig.savefig(next_name(env["pd-work"]), bbox_extra_artists=(leg,), bbox_inches='tight')
# plt.savefig(next_name(env["pd-work"]))
plt.show()
def viz_stats_3p(env, df_per_gene, tools, list_ref, **kwargs):
# type: (Environment, pd.DataFrame, List[str], List[str], Dict[str, Any]) -> None
"""Visualize statistics at 3prime level"""
pf_checkpoint = get_value(kwargs, "pf_checkpoint", None)
if not pf_checkpoint or not os.path.isfile(pf_checkpoint):
reference, df_tidy = _helper_join_reference_and_tidy_data(env, df_per_gene, tools, list_ref)
if pf_checkpoint:
save_obj([reference, df_tidy], pf_checkpoint)
else:
reference, df_tidy = load_obj(pf_checkpoint)
########## Genome Level ##########
df_tidy.loc[df_tidy["Tool"] == "MGM2_AUTO", "Tool"] = "MGM2"
reference = reference.replace("MGM2_AUTO", "MGM2")
# Number of Predictions, number of found
viz_stats_3p_number_of_predictions_number_of_found(env, df_tidy, reference)
# Number of Predictions, Precision
viz_stats_3p_number_of_predictions_precision(env, df_tidy, reference)
# Sensitivity Specificity
viz_stats_3p_sensitivity_specificity(env, df_tidy, reference)
viz_stats_3p_sensitivity_specificity_collective(env, df_tidy, reference)
viz_stats_3p_sensitivity_specificity_inv(env, df_tidy, reference)
########## Gene Level ##########
# Missed vs reference length
viz_stats_3p_missed_vs_length(env, df_per_gene, reference)
def viz_stats_5p(env, df_per_gene, tools, list_ref, **kwargs):
# type: (Environment, pd.DataFrame, List[str], List[str], Dict[str, Any]) -> None
"""Visualize statistics at 5prime level"""
pf_checkpoint = get_value(kwargs, "pf_checkpoint", None)
if not pf_checkpoint or not os.path.isfile(pf_checkpoint):
reference, df_tidy = _helper_join_reference_and_tidy_data(env, df_per_gene, tools, list_ref)
if pf_checkpoint:
save_obj([reference, df_tidy], pf_checkpoint)
else:
reference, df_tidy = load_obj(pf_checkpoint)
# Number of 5p Errors, number of found
df_tidy.loc[df_tidy["Tool"] == "MGM2_AUTO", "Tool"] = "MGM2"
reference = reference.replace("MGM2_AUTO", "MGM2")
viz_stats_5p_number_of_errors_number_of_found(env, df_tidy, reference)
viz_stats_5p_error_rate(env, df_tidy, reference)
viz_stats_5p_error_rate_partial(env, df_tidy, reference)
viz_stats_5p_partial(env, df_tidy, tools, reference)
def viz_stats_per_gene(env, df_per_gene, tools, list_ref_5p, list_ref_3p, **kwargs):
# type: (Environment, pd.DataFrame, List[str], List[str], List[str]) -> None
viz_stats_3p(env, df_per_gene, tools, list_ref_3p, pf_checkpoint=kwargs.get("pf_checkpoint_3p"))
viz_stats_5p(env, df_per_gene, tools, list_ref_5p, pf_checkpoint=kwargs.get("pf_checkpoint_5p"))
def tools_match_for_dataframe_row(r, tools):
# type: (pd.Series, Iterable[str]) -> bool
# check all tools make a prediction for current gene
list_5ps = list()
for t in tools:
if r[f"5p-{t}"] is None:
return False
list_5ps.append(r[f"5p-{t}"])
return all_elements_equal(list_5ps)
def check_columns(df):
for col in df.columns:
weird = (df[[col]].applymap(type) != df[[col]].iloc[0].apply(type)).any(axis=1)
if len(df[weird]) > 0:
print(col)
def main(env, args):
# type: (Environment, argparse.Namespace) -> None
df = pd.read_csv(args.pf_data)
if args.parse_names:
df["Genome"] = df[["Genome"]].apply(fix_names, axis=1)
#check_columns(df)
#return
df = df[df["Chunk Size"] < 6000].copy()
# get tools list
# If not provided, extract from df
# Make sure it doesn't contain any references
all_tools = sorted(set([x.split("-")[1] for x in df.columns if "5p-" in x]))
# check that references exist
for list_ref in [args.ref_5p, args.ref_3p]:
for ref in list_ref:
if ref not in all_tools:
raise ValueError(f"Unknown reference {ref}")
if args.tools is not None:
tools = args.tools
else:
tools = all_tools
tools = sorted(set(tools).difference({*args.ref_5p}).difference({*args.ref_3p}))
viz_stats_per_gene(env, df, tools, args.ref_5p, args.ref_3p,
pf_checkpoint_5p=args.pf_checkpoint_5p,
pf_checkpoint_3p=args.pf_checkpoint_3p)
if __name__ == "__main__":
main(my_env, parsed_args)
|
import hashlib
from lxml import etree
import subprocess as sub
import numpy as np
def evaluate_population(pop, record_history=False):
seed = pop.seed
# clear old .vxd robot files from the data directory
sub.call("rm data{}/*.vxd".format(seed), shell=True)
# remove old sim output.xml if we are saving new stats
if not record_history:
sub.call("rm output{}.xml".format(seed), shell=True)
num_evaluated_this_gen = 0
for n, ind in enumerate(pop):
# don't evaluate if invalid
if not ind.phenotype.is_valid():
for rank, goal in pop.objective_dict.items():
if goal["name"] != "age":
setattr(ind, goal["name"], goal["worst_value"])
print "Skipping invalid individual"
# otherwise create a vxd
else:
num_evaluated_this_gen += 1
pop.total_evaluations += 1
(x, y, z) = ind.genotype.orig_size_xyz
root = etree.Element("VXD") # new vxd root
if record_history:
sub.call("rm champ_{0}_gen{1}.hist".format(seed, pop.gen), shell=True)
history = etree.SubElement(root, "RecordHistory")
history.set('replace', 'VXA.Simulator.RecordHistory')
etree.SubElement(history, "RecordStepSize").text = '100'
structure = etree.SubElement(root, "Structure")
structure.set('replace', 'VXA.VXC.Structure')
structure.set('Compression', 'ASCII_READABLE')
etree.SubElement(structure, "X_Voxels").text = str(x)
etree.SubElement(structure, "Y_Voxels").text = str(y)
etree.SubElement(structure, "Z_Voxels").text = str(z)
for name, details in ind.genotype.to_phenotype_mapping.items():
state = details["state"]
flattened_state = state.reshape(z, x*y)
data = etree.SubElement(structure, name)
for i in range(flattened_state.shape[0]):
layer = etree.SubElement(data, "Layer")
if name == "Data":
str_layer = "".join([str(c) for c in flattened_state[i]])
# need 3d vectors for cilia
# else:
# str_layer = "".join([str(c)+", " for c in flattened_state[i]])
layer.text = etree.CDATA(str_layer)
# hack
base_cilia_force = np.zeros((z, 3*x*y), dtype=np.float16)
for name, details in ind.genotype.to_phenotype_mapping.items():
state = details["state"]
flattened_state = state.reshape(z, x*y)
if name == "cilia_X":
base_cilia_force[:, ::3] = flattened_state
if name == "cilia_Y":
base_cilia_force[:, 1::3] = flattened_state
# if name == "cilia_Z":
# base_cilia_force[:, 2::3] = flattened_state
data = etree.SubElement(structure, "BaseCiliaForce")
for i in range(base_cilia_force.shape[0]):
layer = etree.SubElement(data, "Layer")
str_layer = "".join([str(c) + ", " for c in base_cilia_force[i]])
layer.text = etree.CDATA(str_layer)
# end hack
# md5 so we don't eval the same vxd more than once
m = hashlib.md5()
m.update(etree.tostring(root))
ind.md5 = m.hexdigest()
# don't evaluate if identical phenotype has already been evaluated
if ind.md5 in pop.already_evaluated:
for rank, goal in pop.objective_dict.items():
if goal["tag"] is not None:
setattr(ind, goal["name"], pop.already_evaluated[ind.md5][rank])
print "Age {0} individual already evaluated: cached fitness is {1}".format(ind.age, ind.fitness)
else:
# save the vxd to data folder
with open('data'+str(seed)+'/bot_{:04d}.vxd'.format(ind.id), 'wb') as vxd:
vxd.write(etree.tostring(root))
# ok let's finally evaluate all the robots in the data directory
if record_history: # just save history, don't assign fitness
print "Recording the history of the run champ"
sub.call("./Voxelyze3 -i data{0} > champ_{0}_gen{1}.hist".format(seed, pop.gen), shell=True)
else: # normally, we will just want to update fitness and not save the trajectory of every voxel
print "GENERATION {}".format(pop.gen)
print "Launching {0} voxelyze calls, out of {1} individuals".format(num_evaluated_this_gen, len(pop))
while True:
try:
sub.call("./Voxelyze3 -i data{0} -o output{0}.xml".format(seed), shell=True)
# sub.call waits for the process to return
# after it does, we collect the results output by the simulator
root = etree.parse("output{}.xml".format(seed)).getroot()
break
except IOError:
print "Uh oh, there was an IOError! I'll re-simulate this batch again..."
pass
except IndexError:
print "Uh oh, there was an IndexError! I'll re-simulate this batch again..."
pass
for ind in pop:
if ind.phenotype.is_valid() and ind.md5 not in pop.already_evaluated:
ind.fitness = float(root.findall("detail/bot_{:04d}/fitness_score".format(ind.id))[0].text)
print "Assigning ind {0} fitness {1}".format(ind.id, ind.fitness)
pop.already_evaluated[ind.md5] = [getattr(ind, details["name"])
for rank, details in
pop.objective_dict.items()]
|
# -*- coding: utf-8 -*-
"""
Model Map table csv_shp_output_fields
:author: Sergio Aparicio Vegas
:version: 0.1
:date: 28 sept. 2017
"""
__docformat__ = "restructuredtext"
class CsvShpOutputFields():
""" DB Entity csv_shp_output_fields to Python object CsvShpOutputFields """
def __init__(self):
self.__id = 0
self.__scenario = None
self.__fileCategory = None
self.__fileType = None
self.__calculateModel = None
self.__headerName = None
self.__attributeName = None
self.__format = None
self.__length = None
self.__precision = None
def __str__(self):
return "id:" + str(self.id) + " - scenario:" + self.scenario + " - fileCategory:" + self.fileCategory + " - fileType:" + self.fileType + " - calculateModel:" + self.calculateModel \
+ " - headerName:" + self.headerName + " - attributeName:" + self.attributeName \
+ " - format:" + self.format + " - length:" + str(self.length) + " - precison:" + str(self.precision)
@property
def id(self):
return self.__id
@id.setter
def id(self, val):
self.__id = val
@property
def scenario(self):
return self.__scenario
@scenario.setter
def scenario(self, val):
self.__scenario = val
@property
def fileCategory(self):
return self.__fileCategory
@fileCategory.setter
def fileCategory(self, val):
self.__fileCategory = val
@property
def fileType(self):
return self.__fileType
@fileType.setter
def fileType(self, val):
self.__fileType = val
@property
def calculateModel(self):
return self.__calculateModel
@calculateModel.setter
def calculateModel(self, val):
self.__fileType = val
@property
def headerName(self):
return self.__headerName
@headerName.setter
def headerName(self, val):
self.__headerName = val
@property
def attributeName(self):
return self.__attributeName
@attributeName.setter
def attributeName(self, val):
self.__attributeName = val
@property
def format(self):
return self.__format
@format.setter
def format(self, val):
self.__format = val
@property
def length(self):
return self.__length
@length.setter
def length(self, val):
self.__length = val
@property
def precision(self):
return self.__precision
@precision.setter
def precision(self, val):
self.__precision = val |
from .cell import Cell, CellType
from .pawn import Attacker, Defender, King
from enum import Enum
class GameStatus(Enum):
IN_PROGRESS = 1
DEFENDER_WON = 2
ATTACKER_WON = 3
DRAW = 4
class Game:
"""Class managing the logic of the game.
Constructed from 2 players with default size 11.
Note that instantiation should be left to a Controller.
Main methods:
- execute(move) : performs a move properly.
This is the only correct way to perform a move
- getStatus() : returns the status of the game (Enum class GameStatus)
- isOver() : True is the game is over (won or lost)
- getCell(row, col) : only way to get a cell from a position
"""
def __init__(self, attackingPlayer, defendingPlayer, size=11):
"""Creates a game of the given size (width) with the two given players.
Creates the board of cells and populates it with pawns."""
self.width = size
self.cells = Game.createCells(self.width)
self.attackingPlayer = attackingPlayer
self.defendingPlayer = defendingPlayer
self.king = King(self.defendingPlayer)
self.populate()
def execute(self, move):
"""Performs the given move and all side effects.
This is the only correct way to execute a move."""
move.do_unsafe()
potentiallyTaken = self.getNeighbouringCells(move.endCell)
potentialKiller = move.endCell.getPawn()
for cell in potentiallyTaken:
if cell.isOccupied():
potentialVictim = cell.getPawn()
if(self.isTakenBy(potentialVictim, potentialKiller)):
cell.takePawn()
def getStatus(self):
"Returns the current status of the game (see Enum class GameStatus)"
if(self.king.cell.type == CellType.CASTLE):
return GameStatus.DEFENDER_WON
cellsAroundKing = self.getNeighbouringCells(self.king.cell)
if(all(cell.isBlockingTo(self.king) for cell in cellsAroundKing)):
return GameStatus.ATTACKER_WON
return GameStatus.IN_PROGRESS
def isOver(self):
"Returns True if the game is over (won or lost)"
return self.getStatus() in [GameStatus.ATTACKER_WON,
GameStatus.DEFENDER_WON]
def getCell(self, rowIndex, colIndex):
"Returns cell at the corresponding (row, column)"
return self.cells[rowIndex][colIndex]
def _withinLimits(self, index):
return index >= 0 & index < self.width
def getNeighbouringCells(self, cell):
"""Returns the 4 directly neighbouring cells of the given cell.
May return less than 4 cells if a border cell is given."""
(row, col) = cell.position
neighbouringPos = [(row + 1, col), (row - 1, col),
(row, col + 1), (row, col - 1)]
for (r, c) in neighbouringPos:
if(self._withinLimits(r) & self._withinLimits(c)):
yield self.getCell(r, c)
def isTakenBy(self, victim, killer):
"""Returns True if victim pawn is taken by killer pawn,
assuming killer just moved.
Returns False if victim is the King as capture rules are different."""
if(victim == self.king):
return False
(row, col) = victim.cell.position
nhood1 = [self.getCell(r, c)
for (r, c) in [(row + 1, col), (row - 1, col)]
if self._withinLimits(r) & self._withinLimits(c)]
nhood2 = [self.getCell(r, c)
for (r, c) in [(row, col + 1), (row, col - 1)]
if self._withinLimits(r) & self._withinLimits(c)]
def neighbourhoodIsBlocking(nhood):
return all(cell.isBlockingTo(victim) for cell in nhood)
def neighbourhoodContainsTarget(nhood):
def predicates():
for cell in nhood:
if cell.isOccupied():
yield(cell.getPawn() == killer)
return any(predicates())
def check(nhood):
return (neighbourhoodIsBlocking(nhood) &
neighbourhoodContainsTarget(nhood))
return check(nhood1) | check(nhood2)
def populate(self):
"""Fills in the board with pawns in starting position"""
def a():
return Attacker(self.attackingPlayer)
def d():
return Defender(self.defendingPlayer)
for i in range(4, 9):
self.cells[i][1].placePawn(a())
self.cells[i][self.width].placePawn(a())
self.cells[1][i].placePawn(a())
self.cells[self.width][i].placePawn(a())
self.cells[6][2].placePawn(a())
self.cells[2][6].placePawn(a())
self.cells[10][6].placePawn(a())
self.cells[6][10].placePawn(a())
self.cells[6][6].placePawn(self.king)
for i in [4, 5, 7, 8]:
self.cells[i][6].placePawn(d())
self.cells[6][i].placePawn(d())
for i in [5, 7]:
for j in [5, 7]:
self.cells[i][j].placePawn(d())
@staticmethod
def createCells(n):
"""Creates a n-by-n 2D list of cells as a valid game board:
normal cells surrounded by walls with castles in the 4 corners."""
def wallrow():
wall = [Cell(CellType.VWALL)]
wall.extend([Cell(CellType.HWALL) for _ in range(n)])
wall.append(Cell(CellType.VWALL))
return wall
def normalrow():
out = [Cell(CellType.VWALL)]
out.extend([Cell(CellType.NORMAL) for _ in range(n)])
out.append(Cell(CellType.VWALL))
return out
def castlerow():
out = [Cell(CellType.VWALL), Cell(CellType.CASTLE)]
out.extend([Cell(CellType.NORMAL) for _ in range(n - 2)])
out.extend([Cell(CellType.CASTLE), Cell(CellType.VWALL)])
return out
cells = [wallrow()]
cells.append(castlerow())
cells.extend([normalrow() for _ in range(n - 2)])
cells.append(castlerow())
cells.append(wallrow())
for i in range(n + 2):
for j in range(n + 2):
cells[i][j].setPosition((i, j))
return cells
def __repr__(self):
return ("Game(attackingPlayer=%r, defendingPlayer=%r, size=%r)" %
(self.attackingPlayer, self.defendingPlayer, self.width))
|
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import azure.functions as func
from onefuzztypes.enums import ErrorCode
from onefuzztypes.models import Error
from onefuzztypes.requests import NodeGet, NodeSearch, NodeUpdate
from onefuzztypes.responses import BoolResult
from ..onefuzzlib.endpoint_authorization import call_if_user
from ..onefuzzlib.events import get_events
from ..onefuzzlib.request import not_ok, ok, parse_request
from ..onefuzzlib.workers.nodes import Node, NodeTasks
def get(req: func.HttpRequest) -> func.HttpResponse:
request = parse_request(NodeSearch, req)
if isinstance(request, Error):
return not_ok(request, context="pool get")
if request.machine_id:
node = Node.get_by_machine_id(request.machine_id)
if not node:
return not_ok(
Error(code=ErrorCode.UNABLE_TO_FIND, errors=["unable to find node"]),
context=request.machine_id,
)
if isinstance(node, Error):
return not_ok(node, context=request.machine_id)
node_tasks = NodeTasks.get_by_machine_id(request.machine_id)
node.tasks = [(t.task_id, t.state) for t in node_tasks]
return ok(node)
nodes = Node.search_states(
states=request.state,
pool_name=request.pool_name,
scaleset_id=request.scaleset_id,
)
return ok(nodes)
def post(req: func.HttpRequest) -> func.HttpResponse:
request = parse_request(NodeUpdate, req)
if isinstance(request, Error):
return not_ok(request, context="NodeUpdate")
node = Node.get_by_machine_id(request.machine_id)
if not node:
return not_ok(
Error(code=ErrorCode.UNABLE_TO_FIND, errors=["unable to find node"]),
context=request.machine_id,
)
if request.debug_keep_node is not None:
node.debug_keep_node = request.debug_keep_node
node.save()
return ok(BoolResult(result=True))
def delete(req: func.HttpRequest) -> func.HttpResponse:
request = parse_request(NodeGet, req)
if isinstance(request, Error):
return not_ok(request, context="NodeDelete")
node = Node.get_by_machine_id(request.machine_id)
if not node:
return not_ok(
Error(code=ErrorCode.UNABLE_TO_FIND, errors=["unable to find node"]),
context=request.machine_id,
)
node.set_halt()
if node.debug_keep_node:
node.debug_keep_node = False
node.save()
return ok(BoolResult(result=True))
def patch(req: func.HttpRequest) -> func.HttpResponse:
request = parse_request(NodeGet, req)
if isinstance(request, Error):
return not_ok(request, context="NodeRestart")
node = Node.get_by_machine_id(request.machine_id)
if not node:
return not_ok(
Error(code=ErrorCode.UNABLE_TO_FIND, errors=["unable to find node"]),
context=request.machine_id,
)
node.stop()
if node.debug_keep_node:
node.debug_keep_node = False
node.save()
return ok(BoolResult(result=True))
def main(req: func.HttpRequest, dashboard: func.Out[str]) -> func.HttpResponse:
methods = {"GET": get, "PATCH": patch, "DELETE": delete, "POST": post}
method = methods[req.method]
result = call_if_user(req, method)
events = get_events()
if events:
dashboard.set(events)
return result
|
import os
import atexit
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from audcon import configuration
from gpm import logging
app = Flask(__name__)
app.config.from_object(configuration.cfg)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
log = logging.Log(script=os.path.join(os.path.dirname(os.path.abspath(__file__)),'audcon.py'), log_level=10,
rotating=True)
log.info('init')
from audcon import views, models
# Schedule scanner and converter
from apscheduler.schedulers.background import BackgroundScheduler
from audcon.modules import scanner, converter
scheduler = BackgroundScheduler(daemon=True)
scheduler.add_job(scanner.scan, trigger="interval", minutes=15)
scheduler.add_job(converter.convert, trigger="interval", minutes=60)
scheduler.start()
# Shut down the scheduler when exiting the app
atexit.register(lambda: scheduler.shutdown())
|
#!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
PyCOMPSs Testbench PSCO Models
========================
"""
import socket
from pycompss.tests.resources.storage.Object import SCO
from pycompss.api.task import task
from pycompss.api.parameter import INOUT
from pycompss.util.serialization.serializer import serialize_to_file
def update_file(obj):
if obj.getID() is not None:
storage_path = '/tmp/PSCO/' + str(socket.gethostname()) + '/' # NOSONAR
serialize_to_file(obj, storage_path + obj.getID() + ".PSCO")
class MySO(SCO):
"""
@ClassField value int
"""
# For simple PSCO test
value = 0
def __init__(self, v): # noqa
self.value = v
def get(self):
return self.value
def put(self, v):
self.value = v
update_file(self)
@task(target_direction=INOUT)
def increment(self):
self.value += 1
self.updatePersistent()
class Words(SCO):
"""
@ClassField text dict <<position:int>, word_info:str>
"""
# For Wordcount Test
text = ''
def __init__(self, t): # noqa
self.text = t
def get(self):
return self.text
class Result(SCO):
"""
@ClassField myd dict <<word:str>,instances:atomicint>
"""
myd = {}
def get(self):
return self.myd
def set(self, d):
self.myd = d
update_file(self)
# For Tiramisu mockup test
class InputData(SCO):
"""
@ClassField images dict <<image_id:str>, value:list>
"""
images = {}
def get(self):
return self.images
def set(self, i):
self.images = i
update_file(self)
|
import requests
import common_data
from signature import Signature
import logging, sys
# set url
url = common_data.mycloud_url
# url = "http://127.0.0.1:8888"
path = "/d/oauth/authorize"
def get_device_authentication_token():
headers = dict()
headers = init_headers(headers)
body_content = dict()
init_body_content(body_content)
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 200:
resp_data = response.json()
if 'data' in resp_data and 'access_token' and 'refresh_token' in resp_data['data']:
return resp_data['data']
else:
return {"access_token": "", "refresh_token": ""}
else:
return {"access_token": "", "refresh_token": ""}
def init_headers(headers):
headers['Content-Type'] = common_data.content_type
headers['X-Signature'] = ""
return headers
def init_body_content(body_content):
body_content['app_id'] = common_data.app_id
body_content['certificate_serial'] = common_data.certificate_serial
body_content['cloud_id'] = common_data.cloud_id
body_content['mac_address'] = common_data.device_mac_addr
body_content['serial_number'] = common_data.device_serial_number
return body_content
# remove header Content-Type
def testcase_0(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 200 :
resp_data = response.json()
if 'data' in resp_data and 'access_token' and 'refresh_token' in resp_data['data']:
print "TEST CASE 0 OK"
else:
print "TEST CASE 0 FAILED"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
else:
print "TEST CASE 0 FAILED"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
# remove header Content-Type
def testcase_1(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
headers.pop('Content-Type')
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.18":
print "TEST CASE 1 OK"
else:
print "TEST CASE 1 FAILED"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_2(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
headers.pop('X-Signature')
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.0":
print "TEST CASE 2 OK!"
else:
print "TEST CASE 2 FAILED!"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_3(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
body_content.pop('app_id')
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.4":
print "TEST CASE 3 OK!"
else:
print "TEST CASE 3 FAILED!"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_4(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
body_content.pop('certificate_serial')
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.2":
print "TEST CASE 4 OK!"
else:
print "TEST CASE 4 FAILED!"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_5(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
body_content.pop('cloud_id')
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.25":
print "TEST CASE 5 OK!"
else:
print "TEST CASE 5 FAILED!"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_6(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
body_content.pop('mac_address')
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.22":
print "TEST CASE 6 OK!"
else:
print "TEST CASE 6 FAILED!"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_7(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
body_content.pop('serial_number')
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.23":
print "TEST CASE 7 OK!"
else:
print "TEST CASE 7 FAILED!"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_8(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
headers['Content-Type'] = "INVALID_CONTENT_TYPE"
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.19":
print "TEST CASE 8 OK!"
else:
print "TEST CASE 8 FAILED!"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_9(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
headers['X-Signature'] = "INVALID_SIGNATURE"
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.1":
print "TEST CASE 9 OK!"
else:
print "TEST CASE 9 FAILED!"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_10(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
body_content['app_id'] = "INVALID_APP_ID"
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.5":
print "TEST CASE 10 OK!"
else:
print "TEST CASE 10 FAILED!"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_11(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
body_content['certificate_serial'] = "INVALID_CERTIFICATE_SERIAL"
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.3":
print "TEST CASE 11 OK!"
else:
print "TEST CASE 11 FAILED!"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_12(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
body_content['cloud_id'] = "INVALID_CLOUD_ID"
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.26":
print "TEST CASE 12 OK!"
else:
print "TEST CASE 12 FAILED!"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_13(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
body_content['mac_address'] = "INVALID_MAC_ADDRESS"
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.24":
print "TEST CASE 13 OK!"
else:
print "TEST CASE 13 FAILED!"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_14(headers, body_content):
headers = headers.copy()
body_content = body_content.copy()
body_content['serial_number'] = "INVALID_SERIAL_NUMBER"
concat_text = common_data.get_concat_text(body_content)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.post(url + path, data=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.24":
print "TEST CASE 14 OK!"
else:
print "TEST CASE 14 FAILED!"
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
if __name__ == '__main__':
# set headers
headers = dict()
headers = init_headers(headers)
# set body
body_content = dict()
init_body_content(body_content)
# testcase_0(headers, body_content)
# testcase_1(headers, body_content)
# testcase_2(headers, body_content)
# testcase_3(headers, body_content)
# testcase_4(headers, body_content)
# testcase_5(headers, body_content)
# testcase_6(headers, body_content)
# testcase_7(headers, body_content)
# testcase_8(headers, body_content)
# testcase_9(headers, body_content)
# testcase_10(headers, body_content)
# testcase_11(headers, body_content)
# testcase_12(headers, body_content)
# testcase_13(headers, body_content)
testcase_14(headers, body_content) |
from flask import Flask
app = Flask(__name__)
def load_binary(file):
with open(file, 'rb') as file:
return file.read()
@app.route('/createt-shirt')
def generatet_shirt():
resp = Flask.make_response(app, load_binary("logo-eina.png"))
resp.headers['Content-Type'] = 'image/png'
return resp
if __name__ == '__main__':
app.run(host='0.0.0.0') |
#!/usr/bin/python3
import numpy as np
import re
import sys
import multiprocessing as mp
# import threading
# import queue
from time import sleep
argvs = sys.argv
stop_reading = False
class SpikeData:
def __init__(self, no, pc, inst, mnemonic):
self.no = no
self.pc = pc
self.inst = inst
self.mnemonic = mnemonic
self.is_reg_write = False
def set_reg(self, reg_addr, reg_data):
self.is_reg_write = True
self.reg_addr = reg_addr
self.reg_data = reg_data
def no(self):
return self.no
def pc(self):
return self.pc
def inst(self):
return self.inst
def mnemonic(self):
return self.mnemonic
def is_reg_write(self):
return self.is_reg_write
def reg_addr(self):
return self.reg_addr
def reg_data(self):
return self.reg_data
class ForestData:
def __init__(self, no, pc, inst):
self.no = no
self.pc = pc
self.inst = inst
self.is_reg_write = False
def set_reg(self, reg_addr, reg_data):
self.is_reg_write = True
self.reg_addr = reg_addr
self.reg_data = reg_data
def no(self):
return self.no
def pc(self):
return self.pc
def inst(self):
return self.inst
def mnemonic(self):
return self.mnemonic
def is_reg_write(self):
return self.is_reg_write
def reg_addr(self):
return self.reg_addr
def reg_data(self):
return self.reg_data
LOAD_STEP = 1
#================================
# REGISTER RTL TRACE INFORMATION
#================================
if len(argvs) != 5:
print("usage: comptrace_spike_swimmer.rb spike-trace-file swimmer-trace-file skip=0")
exit()
spike_inst_log = mp.Queue()
swimmer_inst_log = mp.Queue()
#======================
# Load Spike Trace Log
#======================
def load_spike_log (spike_trace_fp, skip_inst):
print("Start load_spike_log()")
step = 0
spike_line = spike_trace_fp.readline()
while spike_line != '' :
if stop_reading == True:
exit()
if re.match('^<', spike_line) or spike_line.find('//') != -1 :
spike_line = spike_trace_fp.readline()
continue
# PC Trace Log
if re.match(".*core.*", spike_line):
if spike_line.find("exception") != -1 :
spike_line = spike_trace_fp.readline()
continue
if spike_line.find("core 0: tval") != -1 :
spike_line = spike_trace_fp.readline()
continue
if step == 0:
cut_spike_line = re.split(' +', spike_line)
spike_no = int(cut_spike_line[0], 10)
spike_pc = int(cut_spike_line[3], 16)
spike_inst = int(cut_spike_line[4].replace("(0x", "").replace(")",""), 16)
spike_mnemonic = cut_spike_line[5]
# print ("spike_no = %d < skip_inst = %d" % (spike_no, skip_inst))
if spike_no < skip_inst:
spike_line = spike_trace_fp.readline()
continue
spike_data = SpikeData(spike_no, spike_pc, spike_inst, spike_mnemonic)
while True:
spike_line = spike_trace_fp.readline()
if not re.match('^<', spike_line) and not spike_line.find('//') != -1 :
break
# RegAddr Trace Log
if re.match('^3 ' , spike_line) or re.match('^1 ' , spike_line) or re.match('^0 ' , spike_line) or \
re.match('^:3 ', spike_line) or re.match('^:1 ', spike_line) or re.match('^:0 ', spike_line):
cut_spike_line = re.split(' +', spike_line)
if len(cut_spike_line) > 3 and (cut_spike_line[3] != "mem") :
# spike_pc = int(cut_spike_line[1], 16)
# spike_inst = int(cut_spike_line[2].replace("(0x", "").replace(")",""), 16)
spike_reg_addr = int(cut_spike_line[3].replace('x','').replace('f',''), 10)
spike_reg_data = int(cut_spike_line[4], 16)
# print ("[Spike ] : Register Data R[%d]]<=%08x" % (spike_reg_addr, spike_reg_data))
spike_data.set_reg(spike_reg_addr, spike_reg_data)
spike_line = spike_trace_fp.readline()
if spike_inst_log.qsize() >= 1000000:
sleep(0.1)
# print("spike %d, %016lx" % (spike_data.no, spike_data.pc))
spike_inst_log.put(spike_data)
else:
spike_line = spike_trace_fp.readline()
step = step + 1
if step == LOAD_STEP:
step = 0
else:
spike_line = spike_trace_fp.readline()
spike_inst_log.put(SpikeData(-1, -1, -1, ""))
print("Finished load_spike_log()")
#======================
# Load Forest Trace Log
#======================
def load_swimmer_log (swimmer_trace_fp, skip_inst):
print("Start load_swimmer_log() skip_inst = %d" % skip_inst)
step = 0
for swimmer_line in swimmer_trace_fp:
if stop_reading == True:
exit()
# PC Trace Log
if re.match('^[^<]', swimmer_line) and \
swimmer_line.find('//') == -1 and \
swimmer_line.find('<FunctionCall') == -1 and \
swimmer_line.find('<Return') == -1 :
if step % LOAD_STEP == 0:
cut_swimmer_line = swimmer_line.split(':')
swimmer_no = int(cut_swimmer_line[0], 10)
swimmer_pc = int(cut_swimmer_line[3], 16)
swimmer_inst = int(cut_swimmer_line[5], 16)
swimmer_behav = cut_swimmer_line[7]
# print ("swimmer_no = %d < skip_inst = %d" % (swimmer_no, skip_inst))
if swimmer_no < skip_inst:
continue
swimmer_data = ForestData(swimmer_no, swimmer_pc, swimmer_inst)
behav_array = swimmer_behav.split(" ")
for behav in behav_array:
# if behav.include?("csrrw") : break
# if behav.include?("csrrs") : break
if re.match('[rf][0-9]{2}<=[0-9a-f]+', behav):
swimmer_reg_addr = int(behav.split("<=")[0].replace('r','').replace('f', ''), 10)
swimmer_reg_data = int(behav.split("<=")[1], 16)
swimmer_data.set_reg(swimmer_reg_addr, swimmer_reg_data)
# print ("[Forest] : Register Data R[%d]]<=%08x" % (swimmer_reg_addr, swimmer_reg_data))
if swimmer_inst_log.qsize() >= 100000:
sleep(0.1)
swimmer_inst_log.put(swimmer_data)
step = step + 1
swimmer_inst_log.put(ForestData(-1, -1, ""))
print("Finished load_swimmer_log()")
def compare_spike_swimmer ():
swimmer_log_idx = 0
swimmer_log = ForestData(0, 0, 0)
while True:
while spike_inst_log.empty():
# print("spike_inst_log is empty")
sleep(0.1)
spike_log = spike_inst_log.get()
if spike_log.pc == -1:
print("//=========================================================")
print("// Spike Log is Finished")
print("//=========================================================")
stop_reading = True
exit()
while swimmer_inst_log.empty():
print("swimmer_inst_log is empty")
sleep(0.1)
swimmer_log = swimmer_inst_log.get()
if swimmer_log.pc == -1:
print("//=========================================================")
print("// Forest Log is Finished")
print("//=========================================================")
stop_reading = True
exit()
# if spike_log.mnemonic.find("addi") != -1:
# continue
# if swimmer_log.no != spike_log.no :
# print("//=========================================================")
# print("// Number Mismatched! Spike=%d Forest=%d" % (spike_log.no, swimmer_log.no))
# print("//=========================================================")
# stop_reading = True
# exit()
if swimmer_log.pc != spike_log.pc :
print("//=========================================================")
print("// PC Mismatched! Spike=%016x Forest=%016x" % (spike_log.pc, swimmer_log.pc))
print("// No=%d / %d, %s" % (spike_log.no, swimmer_log.no, spike_log.mnemonic))
print("//=========================================================")
stop_reading = True
exit()
if swimmer_log.is_reg_write != spike_log.is_reg_write :
print("//=========================================================")
print("// Register Write Mismatched! Spike=%s Forest=%s" % ("True" if spike_log.is_reg_write else "False", "True" if swimmer_log.is_reg_write else "False"))
print("// No=%d / %d, PC=%016x, %s" % (spike_log.no, swimmer_log.no, swimmer_log.pc, spike_log.mnemonic))
print("//=========================================================")
stop_reading = True
exit()
if swimmer_log.is_reg_write == True and spike_log.is_reg_write == True :
if swimmer_log.reg_addr != spike_log.reg_addr :
print("//=========================================================")
print("// Write Register Address Mismatched! Spike=%d Forest=%d" % (spike_log.reg_addr, swimmer_log.reg_addr))
print("// No=%d / %d, PC=%016x, %s" % (spike_log.no, swimmer_log.no, swimmer_log.pc, spike_log.mnemonic))
print("//=========================================================")
stop_reading = True
exit()
if swimmer_log.reg_data != spike_log.reg_data :
print("//=========================================================")
print("// Write Register Data Mismatched! Spike=%016x Forest=%016x" % (spike_log.reg_data, swimmer_log.reg_data))
print("// No=%d / %d, PC=%016x, %s" % (spike_log.no, swimmer_log.no, swimmer_log.pc, spike_log.mnemonic))
print("//=========================================================")
stop_reading = True
exit()
if swimmer_log.no % 300 == 0:
print("// No = %d / %d, PC=%016x, %s" % (spike_log.no, swimmer_log.no, swimmer_log.pc, spike_log.mnemonic),)
swimmer_log_idx = swimmer_log_idx + 1
# spike_inst_log.each { |log|
# printf("[spike]PC = %08x (%08x)", log.pc, log.inst)
# if log.is_reg_write == True :
# printf(" Reg[%d]<=%016x", log.reg_addr, log.reg_data)
# else:
# printf("")
#
# }
# swimmer_inst_log.each { |log|
# printf("[swimmer]PC = %08x (%08x)", log.pc, log.inst)
# if log.is_reg_write == True :
# printf(" Reg[%d]<=%016x", log.reg_addr, log.reg_data)
# else:
# printf("")
#
# }
try:
spike_trace_fp = open (argvs[1], 'r')
except IOError:
print("%s cannot be opened." % argvs[1])
exit()
try:
swimmer_trace_fp = open (argvs[2], 'r')
except IOError:
print("%s cannot be opened." % argvs[2])
exit()
skip_spike_inst = int(argvs[3], 10)
skip_swimmer_inst = int(argvs[4], 10)
th_spike_trace = mp.Process(target=load_spike_log, args=(spike_trace_fp, skip_spike_inst,))
th_swimmer_trace = mp.Process(target=load_swimmer_log, args=(swimmer_trace_fp, skip_swimmer_inst,))
th_compare = mp.Process(target=compare_spike_swimmer)
th_spike_trace.start()
th_swimmer_trace.start()
th_compare.start()
th_spike_trace.join()
th_swimmer_trace.join()
th_compare.join()
|
from office365.runtime.client_object_collection import ClientObjectCollection
class BaseEntityCollection(ClientObjectCollection):
"""Represents a collection of View resources."""
def __init__(self, context, child_item_type=None, resource_path=None, parent=None):
"""
SharePoint entity set
:param office365.sharepoint.client_context.ClientContext context: SharePoint context
:type child_item_type: type[ClientObject]
:type resource_path: office365.runtime.resource_path.ResourcePath
:type parent: office365.sharepoint.base_entity.BaseEntity or None
"""
super(BaseEntityCollection, self).__init__(context, child_item_type, resource_path)
self._parent = parent
|
import jieba
from gensim.models import Word2Vec, KeyedVectors
import logging
import re
logging.basicConfig(level=logging.INFO)
def get_corpus(args):
data_dir = args['data_dir']
corpus = []
sentence_delimiters = re.compile(u'[。\.、::;;,,"(),“”?//><@]')
with open(data_dir, "r") as f:
for line in f:
sentences = sentence_delimiters.split(line)
for s in sentences:
corpus.append([item.lower() for item in jieba.lcut(s, cut_all=True)])
# corpus.append(list(line.lower()))
return corpus
def update_word_embedding(args):
domain_data = get_corpus(args)
w2v_model = Word2Vec(size=300, sg=1, min_count=1)
w2v_model.build_vocab(domain_data)
pretrained_model = KeyedVectors.load_word2vec_format(args['pretrained'], binary=False)
w2v_model.build_vocab([list(pretrained_model.vocab.keys())], update=True)
w2v_model.intersect_word2vec_format(args['pretrained'], binary=False, lockf=1.0)
w2v_model.train(domain_data, total_examples=w2v_model.corpus_count, epochs=w2v_model.epochs)
w2v_model.save(args['finetuned'])
print("Complete word embedding training!")
if __name__ == "__main__":
with open('./config.yaml', 'r', encoding='utf-8') as f:
config = f.read()
config = yaml.load(config, Loader=yaml.Loader)
update_word_embedding(config)
|
from django.shortcuts import render
from rest_framework.views import APIView, Response
from .serializers import *
from rest_framework import generics
class WarriorAPIView(APIView):
def get(self, request):
warriors = Warrior.objects.all()
serializer = WarriorSerializer(warriors, many=True)
return Response({"Warriors": serializer.data})
class ProfessionCreateView(APIView):
def post(self, request):
print("REQUEST DATA", request.data)
profession = request.data.get("profession")
print("PROF DATA", profession)
serializer = ProfessionCreateSerializer(data=profession)
if serializer.is_valid(raise_exception=True):
profession_saved = serializer.save()
return Response({"Success": "Profession '{}' created succesfully.".format(profession_saved.title)})
class SkillAPIView(APIView):
def get(self, request):
skills = Skill.objects.all()
serializer = SkillSerializer(skills, many=True)
return Response({"Skill": serializer.data})
class SkillCreateView(APIView):
def post(self, request):
skill = request.data.get("skill")
serializer = SkillCreateSerializer(data=skill)
if serializer.is_valid(raise_exception=True):
skill_saved = serializer.save()
return Response({"Success": "Skill '{}' created succesfully.".format(skill_saved.title)})
class WarriorListAPIView(generics.ListAPIView):
serializer_class = WarriorSerializer
queryset = Warrior.objects.all()
class ProfessionCreateAPIView(generics.CreateAPIView):
serializer_class = ProfessionCreateSerializer
queryset = Profession.objects.all()
class ProfessionAPIView(generics.ListAPIView):
serializer_class = WarriorProfessionSerializer
queryset = Warrior.objects.all()
class WarriorsCreateView(generics.CreateAPIView):
serializer_class = WarriorSerializer
queryset = Warrior.objects.all()
class SkillOfWarriorAPIView(generics.CreateAPIView):
serializer_class = SkillOfWarriorSerializer
queryset = SkillOfWarrior.objects.all()
class WarriorsUpdateDeleteView(generics.RetrieveUpdateDestroyAPIView):
serializer_class = WarriorSerializer
queryset = Warrior.objects.all()
class WarriorsProfessionView(generics.ListAPIView):
serializer_class = WarriorProfessionNestedSerializer
queryset = Warrior.objects.all()
class WarriorsProfessionSkillView(generics.ListAPIView):
serializer_class = WarriorNestedSerializer
queryset = Warrior.objects.all()
class WarriorsSkillView(generics.ListAPIView):
serializer_class = WarriorSkillNestedSerializer
queryset = Warrior.objects.all()
|
import numpy as np
def error_rate(model_ts, obs_ts, dg):
"""Calculate the error rate.
Error rate is calculated as the percentage of model
annual means that fall within the observed range.
:param model_ts (numpy.ndarray): a 2-D array of annual mean
time series for a given variable indexed by (nens, nyrs)
:param obs_ts (numpy.ndarray): a vector or 2-D array of the
observed time series for the given variable
indexed as (years) or (sample_number, years)
:param dg (float): a scalar specifying how much to extend
the observed range in both directions as a fraction
(e.g., 0.10 extends the observed range by 10% in
both directions)
:return error_rate: a vector containing the error rates
indexed by parameter set (nens)
:rtype: numpy.ndarray
"""
# Number of parameter sets
nens = model_ts.shape[0]
obs_min = np.nanmin(obs_ts)
obs_max = np.nanmax(obs_ts)
error_rate = np.zeros([nens])
error_rate = (100 * np.nansum(np.where(
(model_ts <= obs_min*(1-dg)) | (model_ts >= obs_max*(1+dg)), 1, 0), 1)
/model_ts.shape[1])
return error_rate
def nrmse(model_ts, obs_ts):
"""Calculate the normalized root mean square error (NRMSE).
Note: When multiple observed time series are available,
this function calculates the NRMSE for each time series
and then selects the lowest of those NRMSE values.
:param model_ts (numpy.ndarray): a 2-D array containing the
annual mean time series with shape (nens, nyrs)
:param obs_ts (numpy.ndarray): a vector or 2-D array
containing the observed time series with shape (years) or
(sample_number, years)
:return nrmse: a vector containing the normalized root mean
square error indexed by (nens)
:rtype: numpy.ndarray
"""
# Number of parameter sets
nens = model_ts.shape[0]
# If multiple observational time series exist,
# use the lowest NRMSE for each ensemble member
try:
if obs_ts.shape[1] > 0:
# Number of observational time series
nobs = obs_ts.shape[0]
obs_min = np.nanmin(obs_ts, axis=1)
obs_max = np.nanmax(obs_ts, axis=1)
obs_mean = np.nanmean(obs_ts, axis=1)
temp_nrmse = np.zeros([nobs, nens])
for obsnum in range(nobs):
temp_nrmse[obsnum, :] = (np.sqrt(np.nansum(
(model_ts[:, :]-obs_mean[obsnum])**2, axis=1)
/ model_ts.shape[1])
/ (obs_max[obsnum]-obs_min[obsnum]))
nrmse = np.nanmin(temp_nrmse, axis=0)
temp_nrmse = None
# Otherwise, simply calculate NRMSE
except IndexError:
obs_min = np.nanmin(obs_ts, axis=0)
obs_max = np.nanmax(obs_ts, axis=0)
obs_mean = np.nanmean(obs_ts, axis=0)
nrmse = (np.sqrt(np.nansum(
(model_ts[:, :]-obs_mean)**2, axis=1)
/ model_ts.shape[1])
/ (obs_max-obs_min))
return nrmse
def avg_nrmse(nrmse_array, weights):
"""Calculate the weighted average NRMSE.
The weighted average normalized root mean square error
(NRMSE) is calculated as the weighted Euclidean distance
between each individual variable's NRMSE.
:param nrmse_array (numpy.ndarray): a 3-D array containing the
NRMSE for each individual variable with shape
(CO2levels, varlist, nens)
:param weights (numpy.ndarray, list): the weights to be applied
to each variable indexed by varlist
:return avg_nrmse: a 2-D array containing weighted average
NRMSE values for each parameter set with shape (CO2levels,
nens)
:rtype: numpy.ndarray
"""
nvar = nrmse_array.shape[1]
nrmse_sum_weighted_squares = np.zeros(
[nrmse_array.shape[0],nrmse_array.shape[2]])
for i in range(nvar):
nrmse_sum_weighted_squares = (
nrmse_sum_weighted_squares
+ weights[i]*np.square(nrmse_array[:, i, :]))
avg_nrmse = np.sqrt(nrmse_sum_weighted_squares)
return avg_nrmse |
# Guess
# Import the random module.
# Store a random secret number in a variable
# called secret_number (use randrange or randint).
# The secret number should be from 1 to 20 (inclusive).
# Ask the user to keep guessing the number until they
# get it right. (You might want to print messages
# to tell the user if they guessed right or wrong.)
import random
secret_number = random.randrange(1, 21)
while True:
guess = int(input("Enter your guess (number from 1-20): "))
if guess == secret_number:
print("Congratulations, you guessed right!")
break
print("Oops, that's not right! Keep guessing.")
|
import pkg_resources
from .dataframe import DataFrame
from .series import Series, ViewSeries
__version__ = pkg_resources.get_distribution('raccoon').version
__all__ = ['DataFrame', 'Series', 'ViewSeries']
|
import pymongo
import pytest
from moncoll import Moncoll
settings = {"user": "testuser", "password": "testpass", "dbname": "db_test"}
coll_name = "test_collection"
def test_insert_and_find():
coll = Moncoll(settings, coll_name)
coll.insert_one({"_id": 123})
coll.insert_one({"_id": 456})
res = coll.find()
assert res == [{"_id": 123}, {"_id": 456}]
coll.drop()
def test_bulk_write():
test_data = [
{"name": "Christensen", "gender": "male"},
{"name": "Jefferson", "gender": "male"},
{"name": "Juliette", "gender": "female"},
{"name": "Jill", "gender": "female"},
{"name": "Nancy", "gender": "female"},
]
coll = Moncoll(settings, coll_name)
bulkdata = Moncoll.to_bulklist(test_data, "name")
bulk_res = coll.bulk_write(bulkdata)
assert bulk_res.upserted_count == 5
# print("bulk_res: ", bulk_res.upserted_count)
find_res = coll.find({"gender": "male"})
assert len(find_res) == 2
assert {"_id": "Jefferson", "name": "Jefferson", "gender": "male"} in find_res
assert {"_id": "Christensen", "name": "Christensen", "gender": "male"} in find_res
coll.drop()
# @pytest.mark.dev
def test_aggregate():
test_data = [
{"_id": "5fc3b666e172f2f4951a471a", "price": 267, "category": "banana"},
{"_id": "5fc3b666cba8fbaedbebc508", "price": 136, "category": "apple"},
{"_id": "5fc3b66629110554e6af893a", "price": 367, "category": "apple"},
{"_id": "5fc3b6668ab6e2b58aacf6b3", "price": 367, "category": "apple"},
{"_id": "5fc3b666bcb15d3b1c2a49f6", "price": 396, "category": "banana"},
{"_id": "5fc3b6665720415b11e4febf", "price": 195, "category": "apple"},
{"_id": "5fc3b66632a39823a114ff29", "price": 98, "category": "strawberry"},
{"_id": "5fc3b6667eae905a7cf7ef6f", "price": 285, "category": "strawberry"},
{"_id": "5fc3b666f2e779c0a56e021b", "price": 200, "category": "strawberry"},
]
coll = Moncoll(settings, coll_name)
bulkdata = Moncoll.to_bulklist(test_data)
coll.bulk_write(bulkdata)
res = coll.aggregate(
[
{"$group": {"_id": "$category", "max_price": {"$max": "$price"}}},
{"$sort": {"max_price": 1}},
]
)
assert res == [
{"_id": "strawberry", "max_price": 285},
{"_id": "apple", "max_price": 367},
{"_id": "banana", "max_price": 396},
]
coll.drop() |
"""Top-level package for docs_parser."""
__author__ = """Robert Kumar"""
__email__ = 'robert2398@gmail.com'
__version__ = '0.1.0'
|
from plexserver import PlexFrame
import tkinter as tk
from PIL import Image, ImageTk
def load_images():
d = {}
d['play'] = ImageTk.PhotoImage(Image.open("images/play.png").resize((24, 24), Image.ANTIALIAS))
d['forward'] = ImageTk.PhotoImage(Image.open("images/backward.png").resize((24, 24), Image.ANTIALIAS))
d['backward'] = ImageTk.PhotoImage(Image.open("images/forward.png").resize((24, 24), Image.ANTIALIAS))
d['pause'] = ImageTk.PhotoImage(Image.open("images/pause.png").resize((24, 24), Image.ANTIALIAS))
d['none'] = ImageTk.PhotoImage(Image.open("images/nothingplaying.png").resize((240, 240)))
return d
class App(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.images = load_images()
PlexFrame(self).pack(fill='both', expand=True)
self.mainloop()
def get_root(self):
return self
App()
|
#!/usr/bin/python
import sqlite3
import threading
connlock = threading.RLock()
conn = sqlite3.connect("reader.db")
cursor = conn.cursor()
def lock():
connlock.acquire()
def unlock():
connlock.release()
cursor.execute("select count(*) from sqlite_master where type='table' and name='configuration'")
conf_exists = bool(cursor.fetchone()[0])
if not conf_exists:
cursor.execute("""
create table configuration (
key text primary key,
value text
)""")
cursor.execute("insert into configuration values (?, ?)", ('dbversion', 0))
cursor.execute("""
create table folders (
id integer primary key,
name text,
parent integer,
ordering integer,
foreign key(parent) references folders(id)
)
""")
cursor.execute("insert into folders (name, parent) values (null, null)")
cursor.execute("""
create table feeds (
id integer primary key,
name text,
folder integer,
ordering integer,
url text,
last_checked datetime,
foreign key(folder) references folders(id)
)
""")
cursor.execute("""
create table feeditems (
feed integer not null,
retrieved datetime,
seen boolean default 0,
item blob,
foreign key(feed) references feeds(id)
)
""")
conn.commit()
def conf_get(key):
row = None
try:
connlock.acquire()
cursor.execute("select value from configuration where key=?", (key,))
row = cursor.fetchone()
finally:
connlock.release()
if row == None:
return None
else:
return row[0]
def conf_set(key, val):
try:
connlock.acquire()
cursor.execute("insert or replace into configuration (key, value) values (?, ?)", (key, val))
finally:
connlock.release()
if int(conf_get('dbversion')) != 0:
raise StandardError, "Don't know how to use or upgrade this version of the database"
def query(q, args=()):
try:
connlock.acquire()
cursor.execute(q, args)
data = list(cursor)
return data
finally:
connlock.release()
def insert(q, args=()):
try:
connlock.acquire()
cursor.execute(q, args)
conn.commit()
return cursor.lastrowid
finally:
connlock.release()
|
"""
Automatically extract best config and epoch and retrain the model on both train + val sets
"""
import os
import subprocess
import re
import argparse
from hyperparam_checking import find_best_config_hyperparam_tune, find_best_perf
def retrain(hyperparam_tune_path, save_path, all_model_types=[], all_lambdas=[], shots=[], adversarial=False):
"""
Retrain only the best hyperparam config for each model type on both train + val sets
"""
if not os.path.exists(save_path):
os.mkdir(save_path)
best_records = find_best_config_hyperparam_tune(hyperparam_tune_path)
all_data_dir = []
if shots:
for n_shots in shots:
all_data_dir.append('class_attr_data_10_%d_shot' % n_shots)
else:
all_data_dir.append('class_attr_data_10')
for data_dir in all_data_dir:
for model_type, v in best_records.items():
_, epoch, config_dir = v
if all_model_types and not any([t in model_type for t in all_model_types]):
continue
model_path = os.path.join(config_dir, '%d_model.pth' % epoch)
log_dir = os.path.join(save_path, config_dir.split('/')[-1] + '_' + data_dir)
command = 'python train_sigmoid.py -log_dir %s -e 1000 -optimizer sgd -pretrained -use_aux %s'
if 'simple_finetune' in model_path:
model_suffix = ''
else:
lambda_val = float(re.findall(r"attr_loss_weight_\d*\.\d+", config_dir)[0].split('_')[-1])
if any([t in model_type for t in ['multitask', 'end2end']]) and (all_lambdas and lambda_val not in all_lambdas):
continue
model_suffix = '-use_attr -weighted_loss multiple -data_dir %s -n_attributes 112 -attr_loss_weight %.3f -normalize_loss' % (data_dir, lambda_val)
if 'relu' in hyperparam_tune_path:
model_suffix += ' -use_relu'
print("USE RELU")
if 'end2end' in model_path:
model_suffix += ' -end2end'
elif 'bottleneck' in model_path:
model_suffix += ' -bottleneck'
elif 'onlyAttr' in model_path:
model_suffix += ' -no_img'
scheduler_step = int(re.findall(r"scheduler_step_\d*", config_dir)[0].split('_')[-1])
weight_decay = float(re.findall(r"weight_decay_\d*\.\d+", config_dir)[0].split('_')[-1])
lr = float(re.findall(r"lr_\d*\.\d+", config_dir)[0].split('_')[-1])
model_suffix = model_suffix + " -batch_size %d -weight_decay %f -lr %f -scheduler_step %d" % (64, weight_decay, lr, scheduler_step)
command = command % (log_dir, model_suffix)
if not shots: #also train on val set
command += (' -ckpt %s' % model_path)
if adversarial:
command += ' -image_dir CUB_adversarial/CUB_fixed/train/'
print(command)
subprocess.run([command])
def run_inference(retrain_path, model_types=[], all_lambdas=[], feature_group=False, sequential=False):
"""
Assuming there is only one model of each (model type, lambda value) in retrain_path
Run inference on test set using the best epoch obtained from retraining
if model_type is specified, then only run inference for that model type
"""
for config in os.listdir(retrain_path):
config_dir = os.path.join(retrain_path, config)
if not os.path.isdir(config_dir):
continue
if 'bottleneck' in config:
model_type = 'bottleneck'
elif 'end2end' in config:
model_type = 'end2end'
elif 'use_attr' in config and 'onlyAttr' not in config:
model_type = 'multitask'
elif 'onlyAttr' not in config:
model_type = 'simple_finetune'
else:
model_type = 'onlyAttr'
if model_types and model_type not in model_types:
continue
all_val_acc = find_best_perf(os.path.join(config_dir, 'log.txt'))
epoch = all_val_acc.index(max(all_val_acc))
#epoch = round(epoch, -1) - 20
if epoch < 0:
print(config_dir, ' has not started training')
print(epoch, '\t', config)
model_path = os.path.join(config_dir, '%d_model.pth' % epoch)
if 'attr_loss_weight' in model_path:
lambda_val = float(re.findall(r"attr_loss_weight_\d*\.\d+", config_dir)[0].split('_')[-1])
else:
lambda_val = 1
if any([t in model_types for t in ['multitask', 'end2end']]) and (all_lambdas and lambda_val not in all_lambdas):
continue
if 'NEW_SIGMOID_MODEL' in retrain_path or 'NEW_MODEL' in retrain_path:
command = 'python inference_sigmoid.py -model_dir %s -eval_data test' % model_path
else:
command = 'python inference.py -model_dir %s -eval_data test' % model_path
if feature_group:
command += ' -feature_group_results'
if 'use_attr' in model_path:
command += ' -use_attr -n_attributes 112 -data_dir class_attr_data_10'
if 'onlyAttr' in model_path:
continue
if 'bottleneck' in model_path:
def find_onlyAttr_dir(retrain_path, model_path):
if 'few_shots' in retrain_path:
n_shots = re.findall(r"\d+_shot", model_path)[0]
if sequential:
dir_name = [c for c in os.listdir(retrain_path) if 'onlyAttr_Ahat' in c and n_shots in c][0]
else:
dir_name = [c for c in os.listdir(retrain_path) if 'onlyAttr' in c and 'onlyAttr_Ahat' not in c and n_shots in c][0]
else:
if sequential:
dir_name = [c for c in os.listdir(retrain_path) if 'onlyAttr_Ahat' in c][0]
else:
dir_name = [c for c in os.listdir(retrain_path) if 'onlyAttr' in c and 'onlyAttr_Ahat' not in c][0]
return os.path.join(retrain_path, dir_name)
onlyAttr_dir = find_onlyAttr_dir(retrain_path, model_path)
val_acc = find_best_perf(os.path.join(onlyAttr_dir, 'log.txt'))
model2_path = os.path.join(onlyAttr_dir, '%d_model.pth' % (val_acc.index(max(val_acc))))
config_dir = os.path.join(retrain_path, config)
command += (' -model_dir2 %s -bottleneck' % model2_path)
if 'onlyAttr_Ahat' not in model2_path:
command += ' -use_sigmoid'
if 'adversarial' in retrain_path:
command += ' -image_dir CUB_adversarial/CUB_fixed/test/'
subprocess.run([command])
#TODO: write test inference results to a separate folder
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('-save_path', default=None, help='where the trained models are saved')
parser.add_argument('-results_path', default=None, help='where to parse for the best performance')
args = parser.parse_args()
#retrain(args.results_path, args.save_path, all_model_types=['bottleneck', 'end2end'], all_lambdas=['0.01'], shots=[])
run_inference(args.results_path, model_types=['end2end'], all_lambdas=[0.001], sequential=True)
|
from django.http import HttpResponse
from django.template import Context
from django.template.loader import get_template
from django.http import HttpResponseRedirect
from sgcommapp.models import *
from django.template import RequestContext
from django.shortcuts import render
from django.views.generic import View
from .services import SuperblyServices
class AddFriend(View):
def get(self, request, *args, **kwargs):
response = SuperblyServices.Test_User_Login(request)
if not response:
return HttpResponseRedirect("/")
template_name = 'home.html'
data = {'username': request.session['username']}
return render(request, template_name, data)
def post(self, request, *args, **kwargs):
response = SuperblyServices.Test_User_Login(request)
if not response:
return HttpResponseRedirect("/")
friendName = request.POST.get('friend')
userNameId = request.session['user_id']
userNameObj = Profile.objects.filter(id=userNameId)
user_obj = Profile.objects.filter(username=request.POST.get('friend'))
added = False
if userNameObj[0].username != friendName:
if user_obj.count():
friendAddedObj = Friends.objects.filter(friend_id=friendName, friend_added='False', user_id=userNameId)
if friendAddedObj:
friendUserIDObj = Friends.objects.filter(friend_id = request.session['username'], user_id = user_obj[0].id)
if friendUserIDObj:
#if the user is existing in the Friends database, make friend_added as True.
Friends.objects.filter(friend_id = friendName).update(friend_added = 'True')
added = True
SuperblyServices.notify_user(user_obj[0].id, request.session['username'], added)
else:
#if the user in Friends database has friend_added = False with user_id of the friend
friendUserIDObj = Friends.objects.filter(friend_id = request.session['username'], friend_added = 'False', user_id = user_obj[0].id)
if friendUserIDObj:
#if user in Friend db has friend_added = True
friendObj = Friends.objects.filter(friend_id=friendName, friend_added='True', user_id=userNameId)
if not friendObj:
friend_obj = Friends(friend_id=request.POST.get('friend'), user_id=request.session['user_id'],
friend_added=True)
friend_obj.save()
Friends.objects.filter(friend_id = request.session['username'], user_id = user_obj[0].id).update(friend_added = 'True')
added = True
SuperblyServices.notify_user(user_obj[0].id, request.session['username'], added)
else:
friendObj = Friends.objects.filter(friend_id=friendName, friend_added='True', user_id=userNameId)
if not friendObj:
#if the user is not existing in the Friends database, add the user in the db and friends list.
friend_obj = Friends(friend_id=request.POST.get('friend'), user_id=request.session['user_id'],
friend_added=False)
friend_obj.save()
added = True
SuperblyServices.notify_user(user_obj[0].id, request.session['username'], added)
return HttpResponseRedirect('/home')
|
"""
Word Chain 2.0
Let's update our previous word-chain definition. In this 2.0 version, a word-chain is an array of words, where the next word is formed by either:
Changing exactly one letter from the previous word
Adding or subtracting one letter
Note: You can only do one (not both) for each word change.
Examples
isWordChain(["row", "crow", "crown", "brown", "brawn"]) ➞ True
# add "c" to "row" to get "crow", "n" to get "crown", etc.
isWordChain(["flew", "flaw", "flan", "flat", "fat", "rat", "rot", "tot"]) ➞ True
isWordChain(["meek", "meet", "meat", "teal"]) ➞ False
# "meat" => "teal" changes 2 letters (can only change 1)
isWordChain(["run", "runny", "bunny"]) ➞ False
# "run" => "runny" adds 2 letters (can only add 1)
Notes
All words will be in lower-case.
"""
def levenshtein(a, b):
if not a: return len(b)
if not b: return len(a)
return min(levenshtein(a[1:], b[1:])+(a[0] != b[0]),
levenshtein(a[1:], b)+1,
levenshtein(a, b[1:])+1)
def isWordChain(words):
leven= []
for i in range(len(words)-1):
leven.append((words[i],words[i+1]))
for i in leven:
(a,b) = i
if not a: return len(b)
if not b: return len(a)
if min(levenshtein(a[1:], b[1:])+(a[0] != b[0]), levenshtein(a[1:], b)+1, levenshtein(a, b[1:])+1)>1: return False
return True
isWordChain(["row", "crow", "crown", "brown", "brawn"]) #, True)
#isWordChain(["flew", "flaw", "flan", "flat", "fat", "rat", "rot", "tot"]) #, True)
#isWordChain(["meek", "meet", "meat", "teal"]) #, False)
#isWordChain(["run", "runny", "bunny"]) #, False)
#isWordChain(["fun", "fund", "find", "bind", "wind", "wild", "wile", "wiles"]) #, True)
#isWordChain(["nut", "but", "bot", "boot", "loot", "look", "book", "brook"]) #, True)
#isWordChain(["some", "tome", "tame", "lame", "flame", "flamer", "blamer", "blamers"]) #, True)
#isWordChain(["a", "at", "hat", "that", "what", "flat"]) #, False)
#isWordChain(["link", "blink", "wink", "sink"]) #, False) |
import time
from machine import Pin, I2C
from servo import Servos
#i2c = I2C(sda=Pin(21), scl=Pin(22)) # moxing esp32
#i2c = I2C(sda=Pin('PB6'), scl=Pin('PB7')) # stm32f411eu on WeAct board,software i2c,does not work now!!!
i2c = I2C(1, freq=400000) # create hardware I2c object: I2C1,PB6,PB7
servo = Servos(i2c, address=0x40, freq=50, min_us=500, max_us=2500, degrees=180)
while True:
for i in range(0, 16):
servo.position(i, 0)
time.sleep_ms(500)
for i in range(0, 16):
servo.position(i, 180)
time.sleep_ms(500)
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Nipype : Neuroimaging in Python pipelines and interfaces package.
Nipype intends to create python interfaces to other neuroimaging
packages and create an API for specifying a full analysis pipeline in
python.
Much of the machinery at the beginning of this file has been copied over from
nibabel denoted by ## START - COPIED FROM NIBABEL and a corresponding ## END
"""
import sys
from glob import glob
import os
## START - COPIED FROM NIBABEL
from os.path import join as pjoin
from functools import partial
PY3 = sys.version_info[0] >= 3
if PY3:
string_types = str,
else:
string_types = basestring,
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
# For some commands, use setuptools.
if len(set(('develop', 'bdist_egg', 'bdist_rpm', 'bdist', 'bdist_dumb',
'install_egg_info', 'egg_info', 'easy_install', 'bdist_wheel',
'bdist_mpkg')).intersection(sys.argv)) > 0:
# setup_egg imports setuptools setup, thus monkeypatching distutils.
import setup_egg
from distutils.core import setup
from distutils.version import LooseVersion
from distutils.command.build_py import build_py
from distutils import log
def get_comrec_build(pkg_dir, build_cmd=build_py):
""" Return extended build command class for recording commit
The extended command tries to run git to find the current commit, getting
the empty string if it fails. It then writes the commit hash into a file
in the `pkg_dir` path, named ``COMMIT_INFO.txt``.
In due course this information can be used by the package after it is
installed, to tell you what commit it was installed from if known.
To make use of this system, you need a package with a COMMIT_INFO.txt file -
e.g. ``myproject/COMMIT_INFO.txt`` - that might well look like this::
# This is an ini file that may contain information about the code state
[commit hash]
# The line below may contain a valid hash if it has been substituted
during 'git archive' archive_subst_hash=$Format:%h$
# This line may be modified by the install process
install_hash=
The COMMIT_INFO file above is also designed to be used with git substitution
- so you probably also want a ``.gitattributes`` file in the root directory
of your working tree that contains something like this::
myproject/COMMIT_INFO.txt export-subst
That will cause the ``COMMIT_INFO.txt`` file to get filled in by ``git
archive`` - useful in case someone makes such an archive - for example with
via the github 'download source' button.
Although all the above will work as is, you might consider having something
like a ``get_info()`` function in your package to display the commit
information at the terminal. See the ``pkg_info.py`` module in the nipy
package for an example.
"""
class MyBuildPy(build_cmd):
''' Subclass to write commit data into installation tree '''
def run(self):
build_cmd.run(self)
import subprocess
proc = subprocess.Popen('git rev-parse HEAD',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
repo_commit, _ = proc.communicate()
# Fix for python 3
repo_commit = str(repo_commit)
# We write the installation commit even if it's empty
cfg_parser = ConfigParser()
cfg_parser.read(pjoin(pkg_dir, 'COMMIT_INFO.txt'))
cfg_parser.set('commit hash', 'install_hash', repo_commit)
out_pth = pjoin(self.build_lib, pkg_dir, 'COMMIT_INFO.txt')
cfg_parser.write(open(out_pth, 'wt'))
return MyBuildPy
def _add_append_key(in_dict, key, value):
""" Helper for appending dependencies to setuptools args """
# If in_dict[key] does not exist, create it
# If in_dict[key] is a string, make it len 1 list of strings
# Append value to in_dict[key] list
if key not in in_dict:
in_dict[key] = []
elif isinstance(in_dict[key], string_types):
in_dict[key] = [in_dict[key]]
in_dict[key].append(value)
# Dependency checks
def package_check(pkg_name, version=None,
optional=False,
checker=LooseVersion,
version_getter=None,
messages=None,
setuptools_args=None,
pypi_pkg_name=None
):
''' Check if package `pkg_name` is present and has good enough version
Has two modes of operation. If `setuptools_args` is None (the default),
raise an error for missing non-optional dependencies and log warnings for
missing optional dependencies. If `setuptools_args` is a dict, then fill
``install_requires`` key value with any missing non-optional dependencies,
and the ``extras_requires`` key value with optional dependencies.
This allows us to work with and without setuptools. It also means we can
check for packages that have not been installed with setuptools to avoid
installing them again.
Parameters
----------
pkg_name : str
name of package as imported into python
version : {None, str}, optional
minimum version of the package that we require. If None, we don't
check the version. Default is None
optional : bool or str, optional
If ``bool(optional)`` is False, raise error for absent package or wrong
version; otherwise warn. If ``setuptools_args`` is not None, and
``bool(optional)`` is not False, then `optional` should be a string
giving the feature name for the ``extras_require`` argument to setup.
checker : callable, optional
callable with which to return comparable thing from version
string. Default is ``distutils.version.LooseVersion``
version_getter : {None, callable}:
Callable that takes `pkg_name` as argument, and returns the
package version string - as in::
``version = version_getter(pkg_name)``
If None, equivalent to::
mod = __import__(pkg_name); version = mod.__version__``
messages : None or dict, optional
dictionary giving output messages
setuptools_args : None or dict
If None, raise errors / warnings for missing non-optional / optional
dependencies. If dict fill key values ``install_requires`` and
``extras_require`` for non-optional and optional dependencies.
pypi_pkg_name : None or string
When the pypi package name differs from the installed module. This is the
case with the package python-dateutil which installs as dateutil.
'''
setuptools_mode = not setuptools_args is None
optional_tf = bool(optional)
if version_getter is None:
def version_getter(pkg_name):
mod = __import__(pkg_name)
return mod.__version__
if messages is None:
messages = {}
msgs = {
'missing': 'Cannot import package "%s" - is it installed?',
'missing opt': 'Missing optional package "%s"',
'opt suffix' : '; you may get run-time errors',
'version too old': 'You have version %s of package "%s"'
' but we need version >= %s', }
msgs.update(messages)
status, have_version = _package_status(pkg_name,
version,
version_getter,
checker)
if pypi_pkg_name:
pkg_name = pypi_pkg_name
if status == 'satisfied':
return
if not setuptools_mode:
if status == 'missing':
if not optional_tf:
raise RuntimeError(msgs['missing'] % pkg_name)
log.warn(msgs['missing opt'] % pkg_name +
msgs['opt suffix'])
return
elif status == 'no-version':
raise RuntimeError('Cannot find version for %s' % pkg_name)
assert status == 'low-version'
if not optional_tf:
raise RuntimeError(msgs['version too old'] % (have_version,
pkg_name,
version))
log.warn(msgs['version too old'] % (have_version,
pkg_name,
version)
+ msgs['opt suffix'])
return
# setuptools mode
if optional_tf and not isinstance(optional, string_types):
raise RuntimeError('Not-False optional arg should be string')
dependency = pkg_name
if version:
dependency += '>=' + version
if optional_tf:
if not 'extras_require' in setuptools_args:
setuptools_args['extras_require'] = {}
_add_append_key(setuptools_args['extras_require'],
optional,
dependency)
return
_add_append_key(setuptools_args, 'install_requires', dependency)
return
def _package_status(pkg_name, version, version_getter, checker):
try:
__import__(pkg_name)
except ImportError:
return 'missing', None
if not version:
return 'satisfied', None
try:
have_version = version_getter(pkg_name)
except AttributeError:
return 'no-version', None
if checker(have_version) < checker(version):
return 'low-version', have_version
return 'satisfied', have_version
## END - COPIED FROM NIBABEL
from build_docs import cmdclass, INFO_VARS
# Add custom commit-recording build command
cmdclass['build_py'] = get_comrec_build('nipype')
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
# The quiet=True option will silence all of the name setting warnings:
# Ignoring attempt to set 'name' (from 'nipy.core' to
# 'nipy.core.image')
# Robert Kern recommends setting quiet=True on the numpy list, stating
# these messages are probably only used in debugging numpy distutils.
config.get_version('nipype/__init__.py') # sets config.version
config.add_subpackage('nipype', 'nipype')
return config
# Prepare setuptools args
if 'setuptools' in sys.modules:
extra_setuptools_args = dict(
tests_require=['nose'],
test_suite='nose.collector',
zip_safe=False,
extras_require = dict(
doc='Sphinx>=0.3',
test='nose>=0.10.1'),
)
pkg_chk = partial(package_check, setuptools_args = extra_setuptools_args)
else:
extra_setuptools_args = {}
pkg_chk = package_check
# Hard and soft dependency checking
pkg_chk('networkx', INFO_VARS['NETWORKX_MIN_VERSION'])
pkg_chk('nibabel', INFO_VARS['NIBABEL_MIN_VERSION'])
pkg_chk('numpy', INFO_VARS['NUMPY_MIN_VERSION'])
pkg_chk('scipy', INFO_VARS['SCIPY_MIN_VERSION'])
pkg_chk('traits', INFO_VARS['TRAITS_MIN_VERSION'])
pkg_chk('nose', INFO_VARS['NOSE_MIN_VERSION'])
pkg_chk('dateutil', INFO_VARS['DATEUTIL_MIN_VERSION'],
pypi_pkg_name='python-dateutil')
################################################################################
# Import the documentation building classes.
try:
from build_docs import cmdclass
except ImportError:
""" Pass by the doc build gracefully if sphinx is not installed """
print "Sphinx is not installed, docs cannot be built"
cmdclass = {}
################################################################################
def main(**extra_args):
from numpy.distutils.core import setup
setup(name=INFO_VARS['NAME'],
maintainer=INFO_VARS['MAINTAINER'],
maintainer_email=INFO_VARS['MAINTAINER_EMAIL'],
description=INFO_VARS['DESCRIPTION'],
long_description=INFO_VARS['LONG_DESCRIPTION'],
url=INFO_VARS['URL'],
download_url=INFO_VARS['DOWNLOAD_URL'],
license=INFO_VARS['LICENSE'],
classifiers=INFO_VARS['CLASSIFIERS'],
author=INFO_VARS['AUTHOR'],
author_email=INFO_VARS['AUTHOR_EMAIL'],
platforms=INFO_VARS['PLATFORMS'],
version=INFO_VARS['VERSION'],
configuration=configuration,
cmdclass=cmdclass,
scripts=glob('bin/*'),
**extra_args)
if __name__ == "__main__":
main(**extra_setuptools_args)
|
class DetectionCollater:
def __init__(self):
pass
def next(self, data):
images = [s['image'] for s in data]
annots = [s['annots'] for s in data]
scales = [s['scale'] for s in data]
origin_hws = [s['origin_hw'] for s in data]
max_h = max(image.shape[0] for image in images)
max_w = max(image.shape[1] for image in images)
input_images = np.zeros((len(images), max_h, max_w, 3),
dtype=np.float32)
for i, image in enumerate(images):
input_images[i, 0:image.shape[0], 0:image.shape[1], :] = image
input_images = torch.from_numpy(input_images)
# B H W 3 ->B 3 H W
input_images = input_images.permute(0, 3, 1, 2)
max_annots_num = max(annot.shape[0] for annot in annots)
if max_annots_num > 0:
input_annots = np.ones(
(len(annots), max_annots_num, 5), dtype=np.float32) * (-1)
for i, annot in enumerate(annots):
if annot.shape[0] > 0:
input_annots[i, :annot.shape[0], :] = annot
else:
input_annots = np.ones(
(len(annots), 1, 5), dtype=np.float32) * (-1)
input_annots = torch.from_numpy(input_annots)
scales = np.array(scales, dtype=np.float32)
scales = torch.from_numpy(scales)
origin_hws = np.array(origin_hws, dtype=np.float32)
origin_hws = torch.from_numpy(origin_hws)
return {
'image': input_images,
'annots': input_annots,
'scale': scales,
'origin_hw': origin_hws,
}
|
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404
from django.views import View
from explorer.models import Query
from explorer.views.auth import PermissionRequiredMixin
from explorer.views.export import _export
class StreamQueryView(PermissionRequiredMixin, View):
permission_required = 'view_permission'
def get(self, request, query_id, *args, **kwargs):
query = get_object_or_404(Query, pk=query_id)
return _export(request, query, download=False)
|
from problem_2.problem_2_SVM import problem_2_SVM
from problem_2.problem_2_DNN import problem_2_DNN
# Problem 2 SVM
problem_2_SVM()
# Problem 2 DNN
problem_2_DNN()
|
from dataclasses import dataclass
from typing import List
from encord.project_ontology.classification_attribute import ClassificationAttribute
@dataclass
class OntologyClassification:
"""
A dataclass which holds classifications of the :class:`.Ontology`.
"""
#: A unique (to the ontology) identifier of the classification.
id: str
#: An 8-character hex string uniquely defining the option.
feature_node_hash: str
#: A List of attributes for the classification.
attributes: List[ClassificationAttribute]
|
# -*- coding:UTF-8 -*-
'''
Created on 2014年12月5日
@author: caifh
'''
import unittest
import creditutils.str_util as utils
import chardet
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testName(self):
pass
def test_escape():
a = '123我们'
b = utils.escape(a)
print(b)
c = ascii(a)
print(c)
def test_decode_escape():
src = 'abc我们'
print(src)
dst = utils.escape_entire(src)
print(dst)
dst = utils.escape(src)
print(dst)
thd = utils.decode_escape(dst)
print(thd)
def test_detect_encoding():
src_buf = 'abcdef'.encode(encoding='ascii')
# print(utils.detect_encoding(src_buf))
print(utils.decode_to_unicode(src_buf))
# print(chardet.detect(src_buf))
src_buf = 'abc我们'.encode(encoding='utf-8')
# print(utils.detect_encoding(src_buf))
print(utils.decode_to_unicode(src_buf))
src_buf = '我们在哪里啊afd'.encode(encoding='gb2312')
# print(utils.detect_encoding(src_buf))
print(utils.decode_to_unicode(src_buf))
def test_maint():
# test_escape()
# test_decode_escape()
test_detect_encoding()
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
# unittest.main()
test_maint()
|
lanche = ('Hambúrguer', 'Suco', 'Pizza', 'Pudim', 'Batata frita')
# cont = len(lanche)
# print(cont)
#Dessa forma é possível mostrar a posição
for cont in range(0, len(lanche)):
print('Eu vou comer {}.'.format(lanche[cont]))
print('Comi muito!') |
import config
import re
def get_subsystem_in_title_info(title):
subsystems = config.get_subsystems()
for subsystem in subsystems:
index = title.lower().find(subsystem.lower())
if index != -1:
opening = '<span class="text-info">'
closing = '</span>'
new_title = title[:index] + opening + title[index:index + len(subsystem)] + closing + title[index + len(subsystem):]
return { 'text': 'Subsystem is in the title', 'class': 'text-success', 'enriched_title': new_title, 'description': "Name of known CMS subsystem is mentioned in the title of this PR" }
return { 'text': 'Subsystem is not in the title', 'class': 'text-danger', 'enriched_title': title, 'description': "Known CMS subsystem is not mentioned in the title of this PR. Specifying a subsystem in the title is considered to be a good practise" }
|
from django.db import models
from swap_in.users.models import User
class Match(models.Model):
user_like = models.ForeignKey(
User,
on_delete=models.CASCADE,
null=False,
related_name="user_likes"
)
user_clothe = models.ForeignKey(
User,
on_delete=models.CASCADE,
null=False,
related_name="user_clothes"
)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains overloads to convert Python to equivalent z3py code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pyctr.overloads import py_defaults
from pyctr.overloads import staging
import z3
init = py_defaults.init
assign = py_defaults.assign
read = py_defaults.read
call = staging.RewritingCallOverload(py_defaults.call)
@call.replaces(abs)
def z3_abs(x):
if isinstance(x, z3.ArithRef):
return z3.If(x < 0, -x, x)
else:
return abs(x)
def and_(a, b):
"""Overload of `and` which builds z3.And statements.
Eagerly simplifies the expression if any of the operands are Python booleans.
Otherwise, a z3.And is generated for z3.BoolRef objects.
Args:
a: Union[bool, z3.BoolRef], first operand of `and`
b: Tuple[Callable[[], Union[bool, z3.BoolRef]]], lazy thunks for remaining
operands
Returns:
corresponding z3.And expression, or a Python expression if no z3.BoolRefs.
"""
assert isinstance(b, tuple)
if not b:
return a
if isinstance(a, z3.BoolRef):
return_val = and_(b[0](), b[1:])
if isinstance(return_val, z3.BoolRef):
return z3.And(a, return_val)
else:
if return_val:
return a
else:
return False
else:
if a:
return and_(b[0](), b[1:])
else:
return False
def or_(a, b):
"""Overload of `or` which builds z3.Or statements.
Eagerly simplifies the expression if any of the operands are Python booleans.
Otherwise, a z3.Or is generated for z3.BoolRef objects.
Args:
a: Union[bool, z3.BoolRef], first operand of `or`
b: Tuple[Callable[[], Union[bool, z3.BoolRef]]], lazy thunks for remaining
operands
Returns:
corresponding z3.Or expression, or a Python expression if no z3.BoolRefs.
"""
assert isinstance(b, tuple)
if not b:
return a
if isinstance(a, z3.BoolRef):
return_val = or_(b[0](), b[1:])
if isinstance(return_val, z3.BoolRef):
return z3.Or(a, return_val)
else:
if return_val:
return True
else:
return a
else:
if a:
return True
else:
return or_(b[0](), b[1:])
def not_(x):
if isinstance(x, z3.BoolRef):
return z3.Not(x)
else:
return not x
def if_stmt(cond, body, orelse, local_writes):
"""Functional form of an if statement.
Args:
cond: Callable with no arguments, predicate of conditional.
body: Callable with no arguments, and outputs of the positive (if) branch as
return type.
orelse: Callable with no arguments, and outputs of the negative (else)
branch as return type.
local_writes: list(pyct.Variable), list of variables assigned in either body
or orelse.
Returns:
Tuple containing the statement outputs.
"""
cond_result = cond()
if isinstance(cond_result, z3.BoolRef):
body_vals, _ = staging.execute_isolated(body, local_writes)
orelse_vals, _ = staging.execute_isolated(orelse, local_writes)
for body_result, else_result, modified_var in zip(body_vals, orelse_vals,
local_writes):
# Unlike e.g., TensorFlow, z3 does not do tracing on If statements.
# Instead, it expects the results of the body and orelse branches passed
# as values. As such, each result is the result of the deferred z3.If
# statement.
modified_var.val = z3.If(cond_result, body_result, else_result)
else:
py_defaults.if_stmt(lambda: cond_result, body, orelse, local_writes)
|
# -*- coding: utf-8 -*-
import numpy as np
import statsmodels as sm
from ThymeBoost.trend_models.trend_base_class import TrendBaseModel
from pmdarima.arima import auto_arima
class ArimaModel(TrendBaseModel):
"""ARIMA Model from Statsmodels"""
model = 'arima'
def __init__(self):
self.model_params = None
self.fitted = None
def __str__(self):
return f'{self.model}({self.kwargs["arima_order"]})'
def fit(self, y, **kwargs):
"""
Fit the trend component in the boosting loop for a arima model.
Parameters
----------
time_series : TYPE
DESCRIPTION.
**kwargs : TYPE
DESCRIPTION.
Returns
-------
None.
"""
self.kwargs = kwargs
self.order = kwargs['arima_order']
self.arima_trend = kwargs['arima_trend']
bias = kwargs['bias']
if self.order == 'auto':
ar_model = auto_arima(y,
seasonal=False,
error_action='warn',
trace=False,
supress_warnings=True,
stepwise=True,
random_state=20,
n_fits=50)
self.fitted = ar_model.predict_in_sample()
else:
ar_model = sm.tsa.arima.model.ARIMA(y - bias,
order=self.order,
trend=self.arima_trend).fit()
self.fitted = ar_model.predict(start=0, end=len(y) - 1) + bias
self.model_params = (ar_model, bias, len(y))
return self.fitted
def predict(self, forecast_horizon, model_params):
last_point = model_params[2] + forecast_horizon
if self.order == 'auto':
prediction = model_params[0].predict(n_periods=forecast_horizon)
else:
prediction = model_params[0].predict(start=model_params[2] + 1, end=last_point) + \
model_params[1]
return prediction
|
import argparse
import copy
import json
import pickle
import pprint
import os
import sys
from tqdm import tqdm
from typing import *
from my_pybullet_envs import utils
import numpy as np
import torch
import my_pybullet_envs
import pybullet as p
import time
import inspect
from NLP_module import NLPmod
from my_pybullet_envs.inmoov_arm_obj_imaginary_sessions import (
ImaginaryArmObjSession,
)
from my_pybullet_envs.inmoov_shadow_place_env_v9 import (
InmoovShadowHandPlaceEnvV9,
)
from my_pybullet_envs.inmoov_shadow_demo_env_v4 import (
InmoovShadowHandDemoEnvV4,
)
import demo_scenes
no_vision = False
try:
from ns_vqa_dart.bullet.state_saver import StateSaver
from ns_vqa_dart.bullet.vision_inference import VisionInference
import ns_vqa_dart.bullet.util as util
except ImportError:
no_vision = True
# from pose_saver import PoseSaver
currentdir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
homedir = os.path.expanduser("~")
# change obs vec (note diffTar)
# TODOs
# A modified bullet/const classes
# - vary joints the same as send joints it seems, ignore
# - mods the send_joints to new urdf: 1. no palm aux 2. metacarpal is fixed (just always send 0?)
# - represent everything in shoulder frame.
# - fps is problematic, probably want to simulate several steps and send one pose
# - simplify the current bullet class: only needs to transform pose. (warp around pose)
# Where in the code does it handle variable-size objects
# - In the C# code
# - now it is hard-coded in python & c# that there are 3 in the order of box-box-cyl
# - ideally GT bullet can dump a json file that C# can read and call setObj
# TODO: main module depends on the following code/model:
# demo env: especially observation # change obs vec (note diffTar)
# the settings of inmoov hand v2
# obj sizes & frame representation & friction & obj xy range
# frame skip
# vision delay
# what is different from cyl env?
# 1. policy load names
# 2. obj load
# 3. tmp add obj obs, some policy does not use GT
# 5. 4 grid / 6 grid
"""Parse arguments"""
sys.path.append("a2c_ppo_acktr")
parser = argparse.ArgumentParser(description="RL")
parser.add_argument("--seed", type=int, default=101)
parser.add_argument("--non-det", type=int, default=0)
parser.add_argument("--use_vision", action="store_true")
parser.add_argument(
"--pose_path",
type=str,
default="main_sim_stack_new.json",
help="The path to the json file where poses are saved.",
)
parser.add_argument("--scene", type=int, help="The scene to use.")
parser.add_argument("--shape", type=str, help="Shape of top shape.")
parser.add_argument("--size", type=str, help="Shape of top size.")
args = parser.parse_args()
args.det = not args.non_det
"""Configurations."""
PLACE_FLOOR = False
MIX_SHAPE_PI = True
SAVE_POSES = True # Whether to save object and robot poses to a JSON file.
USE_VISION_MODULE = args.use_vision and (not no_vision)
RENDER = True # If true, uses OpenGL. Else, uses TinyRenderer.
GRASP_END_STEP = 30
PLACE_END_STEP = 50
STATE_NORM = False
INIT_NOISE = True
DET_CONTACT = 0 # 0 false, 1 true
OBJ_MU = 1.0
FLOOR_MU = 1.0
HAND_MU = 1.0
BULLET_SOLVER_ITER = 200
IS_CUDA = True # TODO:tmp odd. seems no need to use cuda
DEVICE = "cuda" if IS_CUDA else "cpu"
TS = 1.0 / 240
TABLE_OFFSET = [
0.1,
0.2,
0.0,
] # TODO: vision should notice the 0.2->0.1 change
HALF_OBJ_HEIGHT_L = 0.09
HALF_OBJ_HEIGHT_S = 0.065
SIZE2HALF_H = {"small": HALF_OBJ_HEIGHT_S, "large": HALF_OBJ_HEIGHT_L}
SHAPE2SIZE2RADIUS = {
"box": {"small": 0.025, "large": 0.04},
"cylinder": {"small": 0.04, "large": 0.05},
}
PLACE_CLEARANCE = 0.14 # could be different for diff envs
COLORS = {
"red": [0.8, 0.0, 0.0, 1.0],
"grey": [0.4, 0.4, 0.4, 1.0],
"yellow": [0.8, 0.8, 0.0, 1.0],
"blue": [0.0, 0.0, 0.8, 1.0],
"green": [0.0, 0.8, 0.0, 1.0],
}
# Ground-truth scene:
HIDE_SURROUNDING_OBJECTS = False # If true, hides the surrounding objects.
gt_odicts = demo_scenes.SCENES[args.scene]
top_obj_idx = 1 # TODO: in fact, moved obj
btm_obj_idx = 2 # TODO: in fact, reference obj (place between not considered)
# Override the shape and size of the top object if provided as arguments.
if args.shape is not None:
gt_odicts[top_obj_idx]["shape"] = args.shape
if args.size is not None:
gt_odicts[top_obj_idx]["size"] = args.size
if HIDE_SURROUNDING_OBJECTS:
gt_odicts = [gt_odicts[top_obj_idx], gt_odicts[btm_obj_idx]]
top_obj_idx = 0
btm_obj_idx = 1
top_size = gt_odicts[top_obj_idx]["size"]
btm_size = gt_odicts[btm_obj_idx]["size"]
P_TZ = SIZE2HALF_H[btm_size] * 2
T_HALF_HEIGHT = SIZE2HALF_H[top_size]
IS_BOX = gt_odicts[top_obj_idx]["shape"] == "box" # TODO: infer from language
if MIX_SHAPE_PI:
GRASP_PI = "0313_2_n_25_45"
GRASP_DIR = "./trained_models_%s/ppo/" % "0313_2_n" # TODO
PLACE_PI = "0313_2_placeco_0316_1" # 50ms
PLACE_DIR = "./trained_models_%s/ppo/" % PLACE_PI
else:
if IS_BOX:
GRASP_PI = "0311_box_2_n_20_50"
GRASP_DIR = "./trained_models_%s/ppo/" % "0311_box_2_n" # TODO
PLACE_PI = "0311_box_2_placeco_0316_0" # 50ms
PLACE_DIR = "./trained_models_%s/ppo/" % PLACE_PI
else:
GRASP_PI = "0311_cyl_2_n_20_50"
GRASP_DIR = "./trained_models_%s/ppo/" % "0311_cyl_2_n" # TODO
PLACE_PI = "0311_cyl_2_placeco_0316_0" # 50ms
PLACE_DIR = "./trained_models_%s/ppo/" % PLACE_PI
if IS_BOX:
if PLACE_FLOOR:
sentence = "Put the green box in front of the blue cylinder"
else:
sentence = "Put the green box on top of the blue cylinder"
else:
if PLACE_FLOOR:
sentence = "Put the green cylinder in front of the blue cylinder"
else:
sentence = "Put the green cylinder on top of the blue cylinder"
GRASP_PI_ENV_NAME = "InmoovHandGraspBulletEnv-v5"
PLACE_PI_ENV_NAME = "InmoovHandPlaceBulletEnv-v9"
USE_VISION_DELAY = True
VISION_DELAY = 2
PLACING_CONTROL_SKIP = 6
GRASPING_CONTROL_SKIP = 6
def planning(Traj, recurrent_hidden_states, masks):
print("end of traj", Traj[-1, 0:7])
for ind in range(0, len(Traj)):
tar_armq = Traj[ind, 0:7]
env_core.robot.tar_arm_q = tar_armq
env_core.robot.apply_action([0.0] * 24)
p.stepSimulation()
time.sleep(TS)
# pose_saver.get_poses()
for _ in range(50):
# print(env_core.robot.tar_arm_q)
env_core.robot.tar_arm_q = tar_armq
env_core.robot.apply_action([0.0] * 24) # stay still for a while
p.stepSimulation()
# pose_saver.get_poses()
# print("act", env_core.robot.get_q_dq(env_core.robot.arm_dofs)[0])
# #time.sleep(1. / 240.)
def get_relative_state_for_reset(oid):
obj_pos, obj_quat = p.getBasePositionAndOrientation(oid) # w2o
hand_pos, hand_quat = env_core.robot.get_link_pos_quat(
env_core.robot.ee_id
) # w2p
inv_h_p, inv_h_q = p.invertTransform(hand_pos, hand_quat) # p2w
o_p_hf, o_q_hf = p.multiplyTransforms(
inv_h_p, inv_h_q, obj_pos, obj_quat
) # p2w*w2o
fin_q, _ = env_core.robot.get_q_dq(env_core.robot.all_findofs)
state = {
"obj_pos_in_palm": o_p_hf,
"obj_quat_in_palm": o_q_hf,
"all_fin_q": fin_q,
"fin_tar_q": env_core.robot.tar_fin_q,
}
return state
def load_policy_params(dir, env_name, iter=None):
if iter is not None:
path = os.path.join(dir, env_name + "_" + str(iter) + ".pt")
else:
path = os.path.join(dir, env_name + ".pt")
if IS_CUDA:
actor_critic, ob_rms = torch.load(path)
else:
actor_critic, ob_rms = torch.load(path, map_location="cpu")
# vec_norm = get_vec_normalize(env) # TODO: assume no state normalize
# if not STATE_NORM: assert ob_rms is None
# if vec_norm is not None:
# vec_norm.eval()
# vec_norm.ob_rms = ob_rms
recurrent_hidden_states = torch.zeros(
1, actor_critic.recurrent_hidden_state_size
)
masks = torch.zeros(1, 1)
return (
actor_critic,
ob_rms,
recurrent_hidden_states,
masks,
) # probably only first one is used
def wrap_over_grasp_obs(obs):
obs = torch.Tensor([obs])
if IS_CUDA:
obs = obs.cuda()
return obs
def unwrap_action(act_tensor):
action = act_tensor.squeeze()
action = action.cpu() if IS_CUDA else action
return action.numpy()
def get_traj_from_openrave_container(objs, q_start, q_end, save_file_path, read_file_path):
if q_start is not None:
np.savez(save_file_path, objs, q_start, q_end) # move
else:
np.savez(save_file_path, objs, q_end) # reach has q_start 0
# Wait for command from OpenRave
assert not os.path.exists(read_file_path)
while not os.path.exists(read_file_path):
time.sleep(0.2)
if os.path.isfile(read_file_path):
traj = np.load(read_file_path)
print("loaded")
try:
os.remove(read_file_path)
print("deleted")
# input("press enter")
except OSError as e: # name the Exception `e`
print("Failed with:", e.strerror) # look what it says
# input("press enter")
else:
raise ValueError("%s isn't a file!" % read_file_path)
print("Trajectory obtained from OpenRave!")
# input("press enter")
return traj
def construct_bullet_scene(odicts): # TODO: copied from inference code
# p.resetSimulation()
obj_ids = []
for odict in odicts:
ob_shape = odict["shape"]
assert len(odict["position"]) == 4 # x y and z height
real_loc = np.array(odict["position"][0:3])
if odict["size"] == "small":
ob_shape += "_small"
real_loc += [0, 0, HALF_OBJ_HEIGHT_S + 0.001]
else:
real_loc += [0, 0, HALF_OBJ_HEIGHT_L + 0.001]
urdf_file = (
"my_pybullet_envs/assets/" + ob_shape + ".urdf"
) # TODO: hardcoded path
obj_id = p.loadURDF(
os.path.join(currentdir, urdf_file), real_loc, useFixedBase=0
)
p.changeVisualShape(obj_id, -1, rgbaColor=COLORS[odict["color"]])
p.changeDynamics(obj_id, -1, lateralFriction=OBJ_MU)
obj_ids.append(obj_id)
table_id = p.loadURDF(
os.path.join(currentdir, "my_pybullet_envs/assets/tabletop.urdf"),
TABLE_OFFSET,
useFixedBase=1,
) # main sim uses 0.27, 0.1/ constuct table at last
p.changeVisualShape(table_id, -1, rgbaColor=COLORS["grey"])
p.changeDynamics(table_id, -1, lateralFriction=FLOOR_MU)
return obj_ids
def get_stacking_obs(
top_oid: int,
btm_oid: int,
use_vision: bool,
vision_module=None,
verbose: Optional[bool] = False,
):
"""Retrieves stacking observations.
Args:
top_oid: The object ID of the top object.
btm_oid: The object ID of the bottom object.
use_vision: Whether to use vision or GT.
vision_module: The vision module to use to generate predictions.
Returns:
t_pos: The xyz position of the top object.
t_up: The up vector of the top object.
b_pos: The xyz position of the bottom object.
b_up: The up vector of the bottom object.
t_half_height: Half of the height of the top object.
"""
if use_vision:
top_odict, btm_odict = stacking_vision_module.predict(
client_oids=[top_oid, btm_oid]
)
t_pos = top_odict["position"]
b_pos = btm_odict["position"]
t_up = top_odict["up_vector"]
b_up = top_odict["up_vector"]
t_half_height = top_odict["height"] / 2
if verbose:
print(f"Stacking vision module predictions:")
pprint.pprint(top_odict)
pprint.pprint(btm_odict)
else:
t_pos, t_quat = p.getBasePositionAndOrientation(top_oid)
b_pos, b_quat = p.getBasePositionAndOrientation(btm_oid)
rot = np.array(p.getMatrixFromQuaternion(t_quat))
t_up = [rot[2], rot[5], rot[8]]
rot = np.array(p.getMatrixFromQuaternion(b_quat))
b_up = [rot[2], rot[5], rot[8]]
t_half_height = T_HALF_HEIGHT
return t_pos, t_up, b_pos, b_up, t_half_height
"""Pre-calculation & Loading"""
# latter 2 returns dummy
g_actor_critic, g_ob_rms, _, _ = load_policy_params(
GRASP_DIR, GRASP_PI_ENV_NAME
)
p_actor_critic, p_ob_rms, recurrent_hidden_states, masks = load_policy_params(
PLACE_DIR, PLACE_PI_ENV_NAME
)
"""Vision and language"""
if USE_VISION_MODULE:
# Construct the bullet scene using DIRECT rendering, because that's what
# the vision module was trained on.
vision_p = util.create_bullet_client(mode="direct")
obj_ids = construct_bullet_scene(odicts=gt_odicts)
# Initialize the vision module for initial planning. We apply camera offset
# because the default camera position is for y=0, but the table is offset
# in this case.
state_saver = StateSaver(p=vision_p)
for obj_i in range(len(obj_ids)):
odict = gt_odicts[obj_i]
shape = odict["shape"]
size = odict["size"]
state_saver.track_object(
oid=obj_ids[obj_i],
shape=shape,
color=odict["color"],
radius=SHAPE2SIZE2RADIUS[shape][size],
height=SIZE2HALF_H[size] * 2,
)
initial_vision_module = VisionInference(
state_saver=state_saver,
checkpoint_path="/home/michelle/outputs/ego_v009/checkpoint_best.pt",
camera_position=[
-0.20450591046900168,
0.03197646764976494,
0.4330631992464512,
],
camera_offset=[-0.05, TABLE_OFFSET[1], 0.0],
camera_directed_offset=[0.02, 0.0, 0.0],
apply_offset_to_preds=True,
html_dir="/home/michelle/html/vision_inference_initial",
)
# initial_vision_module = VisionInference(
# state_saver=state_saver,
# checkpoint_path="/home/michelle/outputs/stacking_v003/checkpoint_best.pt",
# camera_position=[-0.2237938867122504, 0.0, 0.5425],
# camera_offset=[0.0, TABLE_OFFSET[1], 0.0],
# apply_offset_to_preds=False,
# html_dir="/home/michelle/html/demo_delay_vision_v003_{top_shape}",
# )
pred_odicts = initial_vision_module.predict(client_oids=obj_ids)
# Artificially pad with a fourth dimension because language module
# expects it.
for i in range(len(pred_odicts)):
pred_odicts[i]["position"] = pred_odicts[i]["position"] + [0.0]
print(f"Vision module predictions:")
pprint.pprint(pred_odicts)
vision_p.disconnect()
language_input_objs = pred_odicts
else:
language_input_objs = gt_odicts
initial_vision_module = None
# stacking_vision_module = None
# [OBJECTS, placing_xyz] = NLPmod(sentence, language_input_objs)
# print("placing xyz from language", placing_xyz)
top_obj_idx, dest_xy, btm_obj_idx = NLPmod(sentence, language_input_objs)
# Build structured OBJECTS list in which first entry is the target object
OBJECTS = np.array([language_input_objs[top_obj_idx]["position"]])
for i in range(len(language_input_objs)):
if i != top_obj_idx:
OBJECTS = np.concatenate(
(OBJECTS, np.array([language_input_objs[i]["position"]]))
)
placing_xyz = [dest_xy[0], dest_xy[1], 0.0]
# Define the grasp position.
if USE_VISION_MODULE:
top_pos = pred_odicts[top_obj_idx]["position"]
g_half_h = pred_odicts[top_obj_idx]["height"] / 2
else:
top_pos = gt_odicts[top_obj_idx]["position"]
g_half_h = T_HALF_HEIGHT
g_tx, g_ty = top_pos[0], top_pos[1]
print(f"Grasp position: ({g_tx}, {g_ty})\theight: {g_half_h}")
if PLACE_FLOOR:
p_tx, p_ty, p_tz = placing_xyz[0], placing_xyz[1], placing_xyz[2]
else:
# Define the target xyz position to perform placing.
p_tx, p_ty = placing_xyz[0], placing_xyz[1]
if USE_VISION_MODULE:
# Temp: replace with GT
# p_tx = gt_odicts[btm_obj_idx]["position"][0]
# p_ty = gt_odicts[btm_obj_idx]["position"][1]
# p_tz = P_TZ
p_tz = pred_odicts[btm_obj_idx]["height"]
else:
p_tz = P_TZ # TODO: need to handle place floor
print(f"Placing position: ({p_tx}, {p_ty}, {p_tz})")
"""Start Bullet session."""
if RENDER:
p.connect(p.GUI)
else:
p.connect(p.DIRECT)
"""Imaginary arm session to get q_reach"""
sess = ImaginaryArmObjSession()
Qreach = np.array(sess.get_most_comfortable_q_and_refangle(g_tx, g_ty)[0])
desired_obj_pos = [p_tx, p_ty, PLACE_CLEARANCE + p_tz]
a = InmoovShadowHandPlaceEnvV9(renders=False, grasp_pi_name=GRASP_PI)
a.seed(args.seed)
table_id = p.loadURDF(
os.path.join(currentdir, "my_pybullet_envs/assets/tabletop.urdf"),
TABLE_OFFSET,
useFixedBase=1,
)
# a.floor_id = table_id # TODO:tmp hack, v9 get_n_optimal_init_arm_qs need to do collision checking
# Qdestin = a.get_n_optimal_init_arm_qs(desired_obj_pos)[0] # TODO: [1] is the 2nd candidate
# print("place arm q", Qdestin)
# p.resetSimulation() # Clean up the simulation, since this is only imaginary.
# a = InmoovShadowHandPlaceEnvV9(renders=False, grasp_pi_name=GRASP_PI)
# a.seed(args.seed)
# # TODO:tmp, get_n_optimal_init_arm_qs need to do collision checking
# table_id = p.loadURDF(
# os.path.join(currentdir, "my_pybullet_envs/assets/tabletop.urdf"),
# utils.TABLE_OFFSET,
# useFixedBase=1,
# )
# desired_obj_pos = [g_tx, g_ty, g_tz]
# Qreach = utils.get_n_optimal_init_arm_qs(a.robot, utils.PALM_POS_OF_INIT,
# p.getQuaternionFromEuler(utils.PALM_EULER_OF_INIT),
# desired_obj_pos, table_id, wrist_gain=3.0)[0] # TODO
# desired_obj_pos = [p_tx, p_ty, utils.PLACE_START_CLEARANCE + p_tz]
p_pos_of_ave, p_quat_of_ave = p.invertTransform(
a.o_pos_pf_ave, a.o_quat_pf_ave
)
# TODO: [1] is the 2nd candidate
Qdestin = utils.get_n_optimal_init_arm_qs(
a.robot, p_pos_of_ave, p_quat_of_ave, desired_obj_pos, table_id
)[0]
p.resetSimulation() # Clean up the simulation, since this is only imaginary.
"""Setup Bullet world."""
p.setPhysicsEngineParameter(numSolverIterations=BULLET_SOLVER_ITER)
p.setPhysicsEngineParameter(deterministicOverlappingPairs=DET_CONTACT)
p.setTimeStep(TS)
p.setGravity(0, 0, -10)
# Load bullet objects again, since they were cleared out by the imaginary
# arm session.
print(f"Loading objects:")
pprint.pprint(gt_odicts)
env_core = InmoovShadowHandDemoEnvV4(
seed=args.seed,
init_noise=INIT_NOISE,
timestep=TS,
withVel=False,
diffTar=True,
robot_mu=HAND_MU,
control_skip=GRASPING_CONTROL_SKIP,
) # TODO: does obj/robot order matter
env_core.robot.reset_with_certain_arm_q([0.0]*7)
obj_ids = construct_bullet_scene(odicts=gt_odicts)
top_oid = obj_ids[top_obj_idx]
btm_oid = obj_ids[btm_obj_idx]
# # Initialize a PoseSaver to save poses throughout robot execution.
# pose_saver = PoseSaver(
# path=args.pose_path, oids=obj_ids, robot_id=env_core.robot.arm_id
# )
"""Prepare for grasping. Reach for the object."""
print(f"Qreach: {Qreach}")
# reach_save_path = homedir + "/container_data/PB_REACH.npz"
# reach_read_path = homedir + "/container_data/OR_REACH.npy"
# Traj_reach = get_traj_from_openrave_container(OBJECTS, None, Qreach, reach_save_path, reach_read_path)
#
# planning(Traj_reach, recurrent_hidden_states, masks)
# input("press enter")
env_core.robot.reset_with_certain_arm_q(Qreach)
# input("press enter 2")
# pose_saver.get_poses()
print(f"Pose after reset")
# pprint.pprint(pose_saver.poses[-1])
g_obs = env_core.get_robot_contact_txty_halfh_obs_nodup(g_tx, g_ty, g_half_h)
g_obs = wrap_over_grasp_obs(g_obs)
"""Grasp"""
control_steps = 0
for i in range(GRASP_END_STEP):
with torch.no_grad():
value, action, _, recurrent_hidden_states = g_actor_critic.act(
g_obs, recurrent_hidden_states, masks, deterministic=args.det
)
env_core.step(unwrap_action(action))
g_obs = env_core.get_robot_contact_txty_halfh_obs_nodup(
g_tx, g_ty, g_half_h
)
g_obs = wrap_over_grasp_obs(g_obs)
# print(g_obs)
# print(action)
# print(control_steps)
# control_steps += 1
# input("press enter g_obs")
masks.fill_(1.0)
# pose_saver.get_poses()
print(f"Pose after grasping")
# pprint.pprint(pose_saver.poses[-1])
final_g_obs = copy.copy(g_obs)
del g_obs, g_tx, g_ty, g_actor_critic, g_ob_rms, g_half_h
state = get_relative_state_for_reset(top_oid)
print("after grasping", state)
print("arm q", env_core.robot.get_q_dq(env_core.robot.arm_dofs)[0])
# input("after grasping")
"""Send move command to OpenRAVE"""
Qmove_init = env_core.robot.get_q_dq(env_core.robot.arm_dofs)[0]
print(f"Qmove_init: {Qmove_init}")
print(f"Qdestin: {Qdestin}")
move_save_path = homedir + "/container_data/PB_MOVE.npz"
move_read_path = homedir + "/container_data/OR_MOVE.npy"
Traj_move = get_traj_from_openrave_container(OBJECTS, Qmove_init, Qdestin, move_save_path, move_read_path)
"""Execute planned moving trajectory"""
planning(Traj_move, recurrent_hidden_states, masks)
print("after moving", get_relative_state_for_reset(top_oid))
print("arm q", env_core.robot.get_q_dq(env_core.robot.arm_dofs)[0])
# input("after moving")
print("palm", env_core.robot.get_link_pos_quat(env_core.robot.ee_id))
# pose_saver.get_poses()
# print(f"Pose before placing")
# pprint.pprint(pose_saver.poses[-1])
"""Prepare for placing"""
env_core.change_control_skip_scaling(c_skip=PLACING_CONTROL_SKIP)
if USE_VISION_MODULE:
# Initialize the vision module for stacking.
top_shape = gt_odicts[top_obj_idx]["shape"]
state_saver = StateSaver(p=p)
state_saver.set_robot_id(env_core.robot.arm_id)
for obj_i in range(len(obj_ids)):
odict = gt_odicts[obj_i]
shape = odict["shape"]
size = odict["size"]
state_saver.track_object(
oid=obj_ids[obj_i],
shape=shape,
color=odict["color"],
radius=SHAPE2SIZE2RADIUS[shape][size],
height=SIZE2HALF_H[size] * 2,
)
stacking_vision_module = VisionInference(
state_saver=state_saver,
checkpoint_path="/home/michelle/outputs/stacking_v003/checkpoint_best.pt",
camera_position=[-0.2237938867122504, 0.0, 0.5425],
camera_offset=[0.0, TABLE_OFFSET[1], 0.0],
apply_offset_to_preds=False,
html_dir="/home/michelle/html/demo_delay_vision_v003_{top_shape}",
)
else:
stacking_vision_module = None
t_pos, t_up, b_pos, b_up, t_half_height = get_stacking_obs(
top_oid=top_oid,
btm_oid=btm_oid,
use_vision=USE_VISION_MODULE,
vision_module=stacking_vision_module,
verbose=True,
)
l_t_pos, l_t_up, l_b_pos, l_b_up, l_t_half_height = (
t_pos,
t_up,
b_pos,
b_up,
t_half_height,
)
# TODO: an unly hack to force Bullet compute forward kinematics
if MIX_SHAPE_PI:
p_obs = env_core.get_robot_contact_txtytz_halfh_shape_2obj6dUp_obs_nodup_from_up(
p_tx, p_ty, p_tz, t_half_height, IS_BOX, t_pos, t_up, b_pos, b_up
)
p_obs = env_core.get_robot_contact_txtytz_halfh_shape_2obj6dUp_obs_nodup_from_up(
p_tx, p_ty, p_tz, t_half_height, IS_BOX, t_pos, t_up, b_pos, b_up
)
else:
p_obs = env_core.get_robot_contact_txtytz_halfh_2obj6dUp_obs_nodup_from_up(
p_tx, p_ty, p_tz, t_half_height, t_pos, t_up, b_pos, b_up
)
p_obs = env_core.get_robot_contact_txtytz_halfh_2obj6dUp_obs_nodup_from_up(
p_tx, p_ty, p_tz, t_half_height, t_pos, t_up, b_pos, b_up
)
p_obs = wrap_over_grasp_obs(p_obs)
print("pobs", p_obs)
# input("ready to place")
"""Execute placing"""
print(f"Executing placing...")
for i in tqdm(range(PLACE_END_STEP)):
with torch.no_grad():
value, action, _, recurrent_hidden_states = p_actor_critic.act(
p_obs, recurrent_hidden_states, masks, deterministic=args.det
)
env_core.step(unwrap_action(action))
if USE_VISION_DELAY:
if (i + 1) % VISION_DELAY == 0:
l_t_pos, l_t_up, l_b_pos, l_b_up, l_t_half_height = (
t_pos,
t_up,
b_pos,
b_up,
t_half_height,
)
t_pos, t_up, b_pos, b_up, t_half_height = get_stacking_obs(
top_oid=top_oid,
btm_oid=btm_oid,
use_vision=USE_VISION_MODULE,
vision_module=stacking_vision_module,
)
if MIX_SHAPE_PI:
p_obs = env_core.get_robot_contact_txtytz_halfh_shape_2obj6dUp_obs_nodup_from_up(
p_tx, p_ty, p_tz, l_t_half_height, IS_BOX, l_t_pos, l_t_up, l_b_pos, l_b_up
)
else:
p_obs = env_core.get_robot_contact_txtytz_halfh_2obj6dUp_obs_nodup_from_up(
p_tx, p_ty, p_tz, l_t_half_height, l_t_pos, l_t_up, l_b_pos, l_b_up
)
else:
t_pos, t_quat, b_pos, b_quat, t_half_height = get_stacking_obs(
top_oid=top_oid,
btm_oid=btm_oid,
use_vision=USE_VISION_MODULE,
vision_module=stacking_vision_module,
)
if MIX_SHAPE_PI:
p_obs = env_core.get_robot_contact_txtytz_halfh_shape_2obj6dUp_obs_nodup_from_up(
p_tx, p_ty, p_tz, t_half_height, IS_BOX, t_pos, t_up, b_pos, b_up
)
else:
p_obs = env_core.get_robot_contact_txtytz_halfh_2obj6dUp_obs_nodup_from_up(
p_tx, p_ty, p_tz, t_half_height, t_pos, t_up, b_pos, b_up
)
p_obs = wrap_over_grasp_obs(p_obs)
# print(action)
# print(p_obs)
# input("press enter g_obs")
masks.fill_(1.0)
# pose_saver.get_poses()
# print(f"Pose after placing")
# pprint.pprint(pose_saver.poses[-1])
print(f"Starting release trajectory")
# execute_release_traj()
for ind in range(0, 100):
p.stepSimulation()
time.sleep(TS)
# pose_saver.get_poses()
# if SAVE_POSES:
# pose_saver.save()
if USE_VISION_MODULE:
initial_vision_module.close()
stacking_vision_module.close()
|
import vg
from .._common.shape import check_shape_any
def project_to_line(points, reference_points_of_lines, vectors_along_lines):
k = check_shape_any(points, (3,), (-1, 3), name="points")
check_shape_any(
reference_points_of_lines,
(3,),
(-1 if k is None else k, 3),
name="reference_points_of_lines",
)
vg.shape.check(locals(), "vectors_along_lines", reference_points_of_lines.shape)
return reference_points_of_lines + vg.project(
points - reference_points_of_lines, onto=vectors_along_lines
)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 21 16:18:07 2022
@author: amasilva
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import glob
class DuneEvolution:
def __init__(self, **kwargs):
self.filename =[]
self.parameter ='h'
# real time or model time (total time)
self.use_real_time = False
for key, value in kwargs.items():
setattr(self, key, value)
with open(self.filename) as f:
lines = f.readlines()
self.paramslines = lines
for line in self.paramslines:
if 'NX =' in line:
self.nx = float((line.split('=')[1]).split()[0])
if 'NY =' in line:
self.ny = float((line.split('=')[1]).split()[0])
if 'dx =' in line:
self.dx = float((line.split('=')[1]).split()[0])
if 'Nt =' in line:
self.nt = float((line.split('=')[1]).split()[0])
if 'save.every' in line:
self.save_every = float((line.split('=')[1]).split()[0])
if 'save.dir' in line and 'save_directory' not in kwargs:
self.save_directory = ((line.split('=')[1]).split()[0])
if 'dt_max =' in line:
self.dt_max = float((line.split('=')[1]).split()[0])
if 'constwind.u' in line:
self.constwind_u = float((line.split('=')[1]).split()[0])
if 'wind.fraction' in line:
self.wind_fraction = float((line.split('=')[1]).split()[0])
if 'veget.xmin' in line:
self.veget_xmin = float((line.split('=')[1]).split()[0])
if 'veget.zmin' in line:
self.veget_zmin = float((line.split('=')[1]).split()[0])
if 'veget.Hveg' in line:
self.veget_Hveg = float((line.split('=')[1]).split()[0])
if 'beach.angle' in line:
self.beach_angle = float((line.split('=')[1]).split()[0])
if 'shore.MHWL' in line:
self.shore_MHWL = float((line.split('=')[1]).split()[0])
if 'shore.sealevel' in line:
self.shore_sealevel = float((line.split('=')[1]).split()[0])
if 'plain.Height' in line:
self.plain_Height = float((line.split('=')[1]).split()[0])
if 'beach.h' in line:
self.beach_h = float((line.split('=')[1]).split()[0])
if 'veget.plain.Height' in line:
self.veget_plain_Height = float((line.split('=')[1]).split()[0])
self.n = int(self.nt/self.save_every)
self.read_time_results()
self.read_variable_results()
def list_parameters(self):
files_params = os.listdir(self.save_directory +'\\')
params =[]
for param in files_params:
params.append(param.split('.')[0])
params_list = np.unique(np.array(params))
self.params_list = params_list.tolist()
return params_list.tolist()
def read_variable_results(self, parameter = 'h'):
if not self.parameter == parameter:
self.parameter = parameter
file_list = glob.glob(self.save_directory +'\\' + self.parameter + '.*.dat')
ny = int(self.ny)
nx = int(self.nx)
n_times_variable = int(self.nt/self.save_every)
if not np.shape(file_list)[0] == n_times_variable:
print('error')
variable_array=np.zeros([n_times_variable, nx, ny+1])
iterations_variable = np.zeros([n_times_variable])
for i, each_file in enumerate(file_list):
# iterations_variable.append(each_file.split('.')[-2])
iteration = np.int(each_file.split('.')[-2])
iterations_order = np.int( iteration/self.dt_max -1)
iterations_variable[iterations_order] = iteration
file_data = pd.read_csv(each_file, sep =' ', header = None)
variable_array[iterations_order, :, :] = np.array([file_data])
self.iterations_variable = iterations_variable
self.total_time = self.iterations_to_totaltime(self.iterations_variable)[0]
self.real_time = self.total_time/self.wind_fraction
self.variable = variable_array[:,:, :-1] # delete last column nan
self.iterations_order = np.arange(0,n_times_variable)
def totaltime_to_iteration(self, time_vector):
it = np.array(time_vector) * 365*24*3600/self.dt_max
if self.dt_max <= 1000:
iterations_variable = np.around(it/5, -2)*5
else:
iterations_variable = np.around(it, -3)
iterations_order = iterations_variable /self.save_every -1
if iterations_order[0]<0:
iterations_order[0]=0
return iterations_variable, iterations_order.astype(int)
def iterations_to_totaltime(self, iterations_vector):
totaltime = iterations_vector * self.dt_max/(365*24*3600)
iterations_order = iterations_vector/self.save_every -1
return totaltime, iterations_order.astype(int)
def convert_to_real_time(self):
self.use_real_time = True
def convert_to_total_time(self):
self.use_real_time = False
def read_time_results(self):
columns = ['iterations', 'real time in yr', 'maximum height', 'maximum cover', 'volume / mass of sand', 'distance traveled by the dune in X', 'dune in flux', 'dune out flux', 'surge above MHWL']
table_data = pd.read_csv((self.save_directory + '\\time.dat'), sep=' ', names = columns, header = 8)
self.time_results = table_data
def plot_time_results(self):
self.time_results.plot('real time in yr', ['distance traveled by the dune in X','dune in flux', 'dune out flux', 'surge above MHWL'])
self.time_results.plot('real time in yr', ['maximum height', 'maximum cover'])
self.time_results.plot('real time in yr', 'volume / mass of sand')
def plot_space_time(self, step = 1, time_vector_yrs = False, parameter = 'h'):
if not time_vector_yrs:
time_vector_order = np.arange(0,self.n, step)
else:
time_vector_order = self.totaltime_to_iteration(time_vector_yrs)[1]
if not self.parameter == parameter:
self.parameter = parameter
self.read_variable_results(parameter=parameter)
fig, ax = plt.subplots()
c = ax.pcolor(self.variable[time_vector_order,:,1], cmap='plasma')
fig.legend([self.parameter])
fig.colorbar(c, ax=ax)
ax.set_xlabel('number of grid columns' )
if self.use_real_time:
ax.set_ylabel('real time in yr')
else:
ax.set_ylabel('total time in yr')
yticks = ax.get_yticks().astype(int)
if not time_vector_yrs:
iter_yticks = yticks * step * self.dt_max + self.save_every
if self.use_real_time:
time_vector_yrs_yticks = self.iterations_to_totaltime(iter_yticks)[0]/self.wind_fraction
else:
time_vector_yrs_yticks = self.iterations_to_totaltime(iter_yticks)[0]
else:
temp = np.array(time_vector_yrs)
if self.use_real_time:
time_vector_yrs_yticks = temp[yticks[:-1]]/self.wind_fraction
else:
time_vector_yrs_yticks = temp[yticks[:-1]]
yticks_yrs = np.around(time_vector_yrs_yticks, 3)
ax.set_yticklabels(yticks_yrs)
def plot_variable_animation(self, step = 1, time_vector_yrs = False, parameter = 'h'):
if not time_vector_yrs:
time_vector_order = np.arange(0,self.n, step)
else:
time_vector_order = self.totaltime_to_iteration(time_vector_yrs)[1]
if not self.parameter == parameter:
self.parameter = parameter
self.read_variable_results(parameter=parameter)
plt.rcParams['figure.figsize'] = [4.5, 6]
plt.rcParams['figure.autolayout'] = True
fig, ax = plt.subplots()
x = np.linspace(0,self.variable.shape[2], self.variable.shape[2]*2)
t = np.linspace(0,time_vector_order.shape[0], time_vector_order.shape[0])
y = np.linspace(0, self.variable.shape[1], self.variable.shape[1])
X3, Y3, T3 = np.meshgrid(x, y, t)
vmin = min(np.array(self.variable[:,:,1]).min(axis=1))
vmax = max(np.array(self.variable[:,:,1]).max(axis=1))
cmap = 'viridis_r' #'rainbow'
nax = ax.pcolor(self.variable[time_vector_order[0],:,:], vmin=vmin, vmax=vmax, cmap = cmap)
fig.colorbar(nax)
ax.set_xlabel('number of grid lines' )
ax.set_ylabel('number of grid columns')
for it, time in enumerate(time_vector_order):
ax.pcolor(self.variable[time_vector_order[it],:,:], vmin = vmin, vmax = vmax, cmap = cmap)
yrs_legend = np.around(self.total_time[(time_vector_order[it])], 3)
if self.use_real_time:
hand = fig.legend(['param = ' + self.parameter + '\n' + 'real time = ' + str(yrs_legend/self.wind_fraction) + ' yrs'],loc = 9 )
else:
hand = fig.legend(['param = ' + self.parameter + '\n' + 'total time = ' + str(yrs_legend) + ' yrs'],loc = 9 )
plt.pause(0.00001)
hand.remove()
def plot_time_steps(self, step= 1, time_vector_yrs = False, parameter = 'h'):
if not time_vector_yrs:
time_vector_order = np.arange(0,self.n, step)
else:
time_vector_order = self.totaltime_to_iteration(time_vector_yrs)[1]
if not self.parameter == parameter:
self.parameter = parameter
self.read_variable_results(parameter=parameter)
plt.rcParams['figure.figsize'] = [8, 5]
plt.rcParams['figure.autolayout'] = True
fig, ax = plt.subplots()
for it, time in enumerate(time_vector_order):
perc = it/time_vector_order.shape[0]
cor = (1-perc, 1-perc/2, perc) #Verde -azul
# color = ((perc/2), 1-(perc/2), perc) #Verde -azul
# color = ((1-perc)/2, 1-perc/2, 1-perc) #Verdes
# color = ((1-perc)/2, (perc)/2, 1-perc) #roxo - preto
# color = ((1-perc), perc/2, perc) #vermelho - azul
# color = ((1-perc/2), perc/2, perc) #vermelho - roxo
# ax.plot(self.variable[time,:,1].T, color=((1-perc)/2, perc/2, 1-perc/4))
# ax.plot(self.variable[time,:,1].T, color=((1-perc)/1, 1-(perc/2), 1-perc))
ax.plot(self.variable[time,:,1].T, color = cor)
# ax.plot(self.variable[time,:,1].T, color=((1-perc), 1-perc/2, 1-perc/2)) #verdes
if self.use_real_time:
ax.legend(np.around(self.total_time[time_vector_order]/self.wind_fraction, 2), loc = 'upper left', ncol=8, mode = 'expand')
plt.title('real time (yrs)', size= 10)
else:
ax.legend(np.around(self.total_time[time_vector_order], 2), loc='upper left', ncol=8, mode='expand')
plt.title('total time (yrs)', size = 10)
# ax.legend(loc=l, bbox_to_anchor=(0.6,0.5))
ax.set_xlabel('number of grid columns' )
ax.set_ylabel('parameter = ' + self.parameter)
return
|
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
import sys
import miscTCGA
import tsvIO
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
NA_VALUE = -999999
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def updateDiseaseCode(dataD):
print " "
print " in updateDiseaseCode ... "
# the feature matrix has thousands of features x hundreds of patients
rowLabels = dataD['rowLabels']
colLabels = dataD['colLabels']
numRow = len(rowLabels)
numCol = len(colLabels)
dataMatrix = dataD['dataMatrix']
print " %d rows x %d columns " % (numRow, numCol)
# print rowLabels[:5]
# print rowLabels[-5:]
keepRow = -1
for iRow in range(numRow):
if (rowLabels[iRow].find("disease_code") >= 0):
keepRow = iRow
if ( keepRow < 0 ): sys.exit(-1)
# outer loop is over columns ...
print " "
print " starting loop over %d columns ... " % numCol
for iCol in range(numCol):
curSample = colLabels[iCol]
diseaseCode = miscTCGA.barcode_to_disease(curSample)
if (diseaseCode == "NA"):
print " got an unknown disease code ??? ", curSample, diseaseCode
else:
if (dataMatrix[keepRow][iCol] == "NA" ):
dataMatrix[keepRow][iCol] = diseaseCode
print " updating disease code from NA to %s " % diseaseCode
else:
if ( dataMatrix[keepRow][iCol] != diseaseCode ):
print " WARNING ??? disease codes do not match ??? !!! ", dataMatrix[keepRow][iCol], diseaseCode
print " current value in disease_code feature : ", dataMatrix[keepRow][iCol]
print " based on the barcode to disease map : ", diseaseCode
print " leaving as is ... "
## sys.exit(-1)
newD = {}
newD['rowLabels'] = rowLabels
newD['colLabels'] = colLabels
newD['dataType'] = dataD['dataType']
newD['dataMatrix'] = dataMatrix
return (newD)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def addDiseaseCode(dataD):
print " "
print " in addDiseaseCode ... "
# the feature matrix has thousands of features x hundreds of patients
rowLabels = dataD['rowLabels']
colLabels = dataD['colLabels']
numRow = len(rowLabels)
numCol = len(colLabels)
dataMatrix = dataD['dataMatrix']
print " %d rows x %d columns " % (numRow, numCol)
# print rowLabels[:5]
# print rowLabels[-5:]
for iRow in range(numRow):
if (rowLabels[iRow].find("disease_code") >= 0):
return ( updateDiseaseCode(dataD) )
## print " ERROR in addDiseaseCode ... this matrix already seems to have this feature ", rowLabels[iRow]
## print " --> will NOT add a new feature (output TSV == input TSV) "
## return (dataD)
numRow += 1
rowLabels += ["C:CLIN:disease_code:::::"]
newM = [0] * numRow
for iR in range(numRow):
newM[iR] = [NA_VALUE] * numCol
if (iR != (numRow - 1)):
for iC in range(numCol):
newM[iR][iC] = dataMatrix[iR][iC]
# outer loop is over columns ...
print " "
print " starting loop over %d columns ... " % numCol
for iCol in range(numCol):
curSample = colLabels[iCol]
diseaseCode = miscTCGA.barcode_to_disease(curSample)
if (diseaseCode == "NA"):
print " got an unknown disease code ??? ", curSample, diseaseCode
newM[numRow - 1][iCol] = diseaseCode
newD = {}
newD['rowLabels'] = rowLabels
newD['colLabels'] = colLabels
newD['dataType'] = dataD['dataType']
newD['dataMatrix'] = newM
return (newD)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if __name__ == "__main__":
if (1):
if (len(sys.argv) == 3):
inFile = sys.argv[1]
outFile = sys.argv[2]
else:
print " "
print " Usage: %s <input TSV file> <output TSV file> "
print " "
print " ERROR -- bad command line arguments "
sys.exit(-1)
print " "
print " Running : %s %s %s " % (sys.argv[0], sys.argv[1], sys.argv[2])
print " "
print " "
# now read in the input feature matrix ...
dataD = tsvIO.readTSV(inFile)
# add a new feature called sampleType
dataD = addDiseaseCode(dataD)
# and write the matrix back out
tsvIO.writeTSV_dataMatrix(dataD, 0, 0, outFile)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
|
import logging
from django.core.management.base import LabelCommand
from corehq.apps.accounting.models import Currency
from corehq.messaging.smsbackends.twilio.models import TwilioBackend
from corehq.apps.sms.models import INCOMING
from corehq.apps.smsbillables.models import SmsGatewayFee, SmsGatewayFeeCriteria
logger = logging.getLogger('accounting')
def bootstrap_twilio_gateway_incoming(apps):
currency_class = apps.get_model('accounting', 'Currency') if apps else Currency
sms_gateway_fee_class = apps.get_model('smsbillables', 'SmsGatewayFee') if apps else SmsGatewayFee
sms_gateway_fee_criteria_class = apps.get_model('smsbillables', 'SmsGatewayFeeCriteria') if apps else SmsGatewayFeeCriteria
# https://www.twilio.com/sms/pricing/us
SmsGatewayFee.create_new(
TwilioBackend.get_api_id(),
INCOMING,
0.0075,
country_code=None,
currency=currency_class.objects.get(code="USD"),
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
logger.info("Updated INCOMING Twilio gateway fees.")
class Command(LabelCommand):
help = "bootstrap incoming Twilio gateway fees"
args = ""
label = ""
def handle(self, *args, **options):
bootstrap_twilio_gateway_incoming(None)
|
"""Teams view."""
import json
from auvsi_suas.models.mission_clock_event import MissionClockEvent
from auvsi_suas.models.uas_telemetry import UasTelemetry
from auvsi_suas.models.takeoff_or_landing_event import TakeoffOrLandingEvent
from auvsi_suas.views import logger
from auvsi_suas.views.decorators import require_superuser
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.utils.decorators import method_decorator
from django.views.generic import View
def user_json(user):
"""Generate JSON-style dict for user."""
telemetry = UasTelemetry.last_for_user(user)
return {
'name': user.username,
'id': user.pk,
'on_clock': MissionClockEvent.user_on_clock(user),
'on_timeout': MissionClockEvent.user_on_timeout(user),
'in_air': TakeoffOrLandingEvent.user_in_air(user),
'telemetry': telemetry.json() if telemetry else None
}
class Teams(View):
"""Gets a list of all teams."""
@method_decorator(require_superuser)
def dispatch(self, *args, **kwargs):
return super(Teams, self).dispatch(*args, **kwargs)
def get(self, request):
users = User.objects.all()
teams = []
for user in users:
# Only standard users are exported
if not user.is_superuser:
teams.append(user_json(user))
return HttpResponse(json.dumps(teams), content_type="application/json")
class TeamsId(View):
"""GET/PUT specific team."""
@method_decorator(require_superuser)
def dispatch(self, *args, **kwargs):
return super(TeamsId, self).dispatch(*args, **kwargs)
def get(self, request, pk):
try:
user = User.objects.get(pk=int(pk))
except User.DoesNotExist:
return HttpResponseBadRequest('Unknown team %s' % pk)
return HttpResponse(
json.dumps(user_json(user)), content_type="application/json")
def put(self, request, pk):
"""PUT allows updating status."""
try:
user = User.objects.get(pk=int(pk))
except User.DoesNotExist:
return HttpResponseBadRequest('Unknown team %s' % pk)
try:
data = json.loads(request.body)
except ValueError:
return HttpResponseBadRequest('Invalid JSON: %s' % request.body)
# Potential events to update.
takeoff_event = None
clock_event = None
# Update whether UAS is in air.
if 'in_air' in data:
in_air = data['in_air']
if not isinstance(in_air, bool):
return HttpResponseBadRequest('in_air must be boolean')
currently_in_air = TakeoffOrLandingEvent.user_in_air(user)
# New event only necessary if changing status
if currently_in_air != in_air:
takeoff_event = TakeoffOrLandingEvent(
user=user, uas_in_air=in_air)
# Update whether UAS in on clock or timeout.
if 'on_clock' in data or 'on_timeout' in data:
currently_on_clock = MissionClockEvent.user_on_clock(user)
currently_on_timeout = MissionClockEvent.user_on_timeout(user)
on_clock = data.get('on_clock', currently_on_clock)
on_timeout = data.get('on_timeout', currently_on_timeout)
if (not isinstance(on_clock, bool) or
not isinstance(on_timeout, bool)):
return HttpResponseBadRequest(
'on_clock and on_timeout must be boolean.')
if on_clock and on_timeout:
return HttpResponseBadRequest(
'Cannot be on mission clock and on timeout.')
# New event only necessary if changing status
if (on_clock != currently_on_clock or
on_timeout != currently_on_timeout):
clock_event = MissionClockEvent(
user=user,
team_on_clock=on_clock,
team_on_timeout=on_timeout)
# Request was valid. Save updates.
if takeoff_event:
takeoff_event.save()
if clock_event:
clock_event.save()
return HttpResponse(
json.dumps(user_json(user)), content_type="application/json")
|
# -----------------------------------------------------------------------------
# Copyright (C) 2019-2020 The python-ndn authors
#
# This file is part of python-ndn.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
import asyncio as aio
from pygtrie import Trie
from typing import List, Optional, Tuple, Dict
from .encoding import InterestParam, FormalName
from .types import InterestNack, Validator, Route
class NameTrie(Trie):
def _path_from_key(self, key: FormalName) -> FormalName:
# bytes(x) will copy x if x is memoryview or bytearray but will not copy bytes
return [x if isinstance(x, memoryview) and x.readonly else bytes(x)
for x in key]
def _key_from_path(self, path: FormalName) -> FormalName:
return path
class InterestTreeNode:
pending_list: List[Tuple[aio.Future, int, bool, bool]] = None
def __init__(self):
self.pending_list = []
def append_interest(self, future: aio.Future, param: InterestParam):
self.pending_list.append((future, param.lifetime, param.can_be_prefix, param.must_be_fresh))
def nack_interest(self, nack_reason: int) -> bool:
for future, _, _, _ in self.pending_list:
future.set_exception(InterestNack(nack_reason))
return True
def satisfy(self, data, is_prefix: bool) -> bool:
exact_match_interest = False
for future, _, can_be_prefix, _ in self.pending_list:
if can_be_prefix or not is_prefix:
future.set_result(data)
else:
exact_match_interest = True
if exact_match_interest:
self.pending_list = [ele for ele in self.pending_list if not ele[2]]
return False
else:
return True
def timeout(self, future: aio.Future):
self.pending_list = [ele for ele in self.pending_list if ele[0] is not future]
return not self.pending_list
def cancel(self):
for future, _, _, _ in self.pending_list:
future.cancel()
class PrefixTreeNode:
callback: Route = None
validator: Optional[Validator] = None
extra_param: Dict = None
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
gdalcalc.py
---------------------
Date : Janaury 2015
Copyright : (C) 2015 by Giovanni Manghi
Email : giovanni dot manghi at naturalgis dot pt
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giovanni Manghi'
__date__ = 'January 2015'
__copyright__ = '(C) 2015, Giovanni Manghi'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
from qgis.core import (QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterNumber,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools.system import isWindows
class gdalcalc(GdalAlgorithm):
INPUT_A = 'INPUT_A'
INPUT_B = 'INPUT_B'
INPUT_C = 'INPUT_C'
INPUT_D = 'INPUT_D'
INPUT_E = 'INPUT_E'
INPUT_F = 'INPUT_F'
BAND_A = 'BAND_A'
BAND_B = 'BAND_B'
BAND_C = 'BAND_C'
BAND_D = 'BAND_D'
BAND_E = 'BAND_E'
BAND_F = 'BAND_F'
FORMULA = 'FORMULA'
OUTPUT = 'OUTPUT'
NO_DATA = 'NO_DATA'
OPTIONS = 'OPTIONS'
RTYPE = 'RTYPE'
TYPE = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64']
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT_A,
self.tr('Input layer A'),
optional=False))
self.addParameter(
QgsProcessingParameterBand(
self.BAND_A,
self.tr('Number of raster band for A'),
parentLayerParameterName=self.INPUT_A))
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT_B,
self.tr('Input layer B'),
optional=True))
self.addParameter(
QgsProcessingParameterBand(
self.BAND_B,
self.tr('Number of raster band for B'),
parentLayerParameterName=self.INPUT_B,
optional=True))
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT_C,
self.tr('Input layer C'),
optional=True))
self.addParameter(
QgsProcessingParameterBand(self.BAND_C,
self.tr('Number of raster band for C'),
parentLayerParameterName=self.INPUT_C,
optional=True))
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT_D,
self.tr('Input layer D'),
optional=True))
self.addParameter(
QgsProcessingParameterBand(
self.BAND_D,
self.tr('Number of raster band for D'),
parentLayerParameterName=self.INPUT_D,
optional=True))
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT_E,
self.tr('Input layer E'),
optional=True))
self.addParameter(
QgsProcessingParameterBand(
self.BAND_E,
self.tr('Number of raster band for E'),
parentLayerParameterName=self.INPUT_E,
optional=True))
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT_F,
self.tr('Input layer F'),
optional=True))
self.addParameter(
QgsProcessingParameterBand(
self.BAND_F,
self.tr('Number of raster band for F'),
parentLayerParameterName=self.INPUT_F,
optional=True))
self.addParameter(
QgsProcessingParameterString(
self.FORMULA,
self.tr('Calculation in gdalnumeric syntax using +-/* or any numpy array functions (i.e. logical_and())'),
'A*2',
optional=False))
self.addParameter(
QgsProcessingParameterNumber(
self.NO_DATA,
self.tr('Set output nodata value'),
type=QgsProcessingParameterNumber.Double,
defaultValue=None,
optional=True))
self.addParameter(
QgsProcessingParameterEnum(
self.RTYPE,
self.tr('Output raster type'),
options=self.TYPE,
defaultValue=5))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
self.addParameter(
QgsProcessingParameterRasterDestination(
self.OUTPUT,
self.tr('Calculated')))
def name(self):
return 'rastercalculator'
def displayName(self):
return self.tr('Raster calculator')
def group(self):
return self.tr('Raster miscellaneous')
def groupId(self):
return 'rastermiscellaneous'
def commandName(self):
return 'gdal_calc' if isWindows() else 'gdal_calc.py'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
formula = self.parameterAsString(parameters, self.FORMULA, context)
if self.NO_DATA in parameters and parameters[self.NO_DATA] is not None:
noData = self.parameterAsDouble(parameters, self.NO_DATA, context)
else:
noData = None
arguments = []
arguments.append('--calc "{}"'.format(formula))
arguments.append('--format')
arguments.append(GdalUtils.getFormatShortNameFromFilename(out))
arguments.append('--type')
arguments.append(self.TYPE[self.parameterAsEnum(parameters, self.RTYPE, context)])
if noData is not None:
arguments.append('--NoDataValue')
arguments.append(noData)
layer = self.parameterAsRasterLayer(parameters, self.INPUT_A, context)
if layer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_A))
arguments.append('-A')
arguments.append(layer.source())
if self.parameterAsString(parameters, self.BAND_A, context):
arguments.append('--A_band ' + self.parameterAsString(parameters, self.BAND_A, context))
if self.INPUT_B in parameters and parameters[self.INPUT_B] is not None:
layer = self.parameterAsRasterLayer(parameters, self.INPUT_B, context)
if layer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_B))
arguments.append('-B')
arguments.append(layer.source())
if self.parameterAsString(parameters, self.BAND_B, context):
arguments.append('--B_band ' + self.parameterAsString(parameters, self.BAND_B, context))
if self.INPUT_C in parameters and parameters[self.INPUT_C] is not None:
layer = self.parameterAsRasterLayer(parameters, self.INPUT_C, context)
if layer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_C))
arguments.append('-C')
arguments.append(layer.source())
if self.parameterAsString(parameters, self.BAND_C, context):
arguments.append('--C_band ' + self.parameterAsString(parameters, self.BAND_C, context))
if self.INPUT_D in parameters and parameters[self.INPUT_D] is not None:
layer = self.parameterAsRasterLayer(parameters, self.INPUT_D, context)
if layer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_D))
arguments.append('-D')
arguments.append(layer.source())
if self.parameterAsString(parameters, self.BAND_D, context):
arguments.append('--D_band ' + self.parameterAsString(parameters, self.BAND_D, context))
if self.INPUT_E in parameters and parameters[self.INPUT_E] is not None:
layer = self.parameterAsRasterLayer(parameters, self.INPUT_E, context)
if layer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_E))
arguments.append('-E')
arguments.append(layer.source())
if self.parameterAsString(parameters, self.BAND_E, context):
arguments.append('--E_band ' + self.parameterAsString(parameters, self.BAND_E, context))
if self.INPUT_F in parameters and parameters[self.INPUT_F] is not None:
layer = self.parameterAsRasterLayer(parameters, self.INPUT_F, context)
if layer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_F))
arguments.append('-F')
arguments.append(layer.source())
if self.parameterAsString(parameters, self.BAND_F, context):
arguments.append('--F_band ' + self.parameterAsString(parameters, self.BAND_F, context))
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
arguments.extend(GdalUtils.parseCreationOptions(options))
arguments.append('--outfile')
arguments.append(out)
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
|
import os
import stat
from shutil import rmtree
from subprocess import check_call
def resolve_path(rel_path):
return os.path.abspath(os.path.join(os.path.dirname(__file__), rel_path))
def rmtree_silent(root):
def remove_readonly_handler(fn, root, excinfo):
if fn is os.rmdir:
if os.path.isdir(root): # if exists
os.chmod(root, stat.S_IWRITE) # make writable
os.rmdir(root)
elif fn is os.remove:
if os.path.isfile(root): # if exists
os.chmod(root, stat.S_IWRITE) # make writable
os.remove(root)
rmtree(root, onerror=remove_readonly_handler)
def makedirs_silent(root):
try:
os.makedirs(root)
except OSError: # mute if exists
pass
if __name__ == "__main__":
build_dir = resolve_path("./build")
rmtree_silent(build_dir)
makedirs_silent(build_dir)
os.chdir(build_dir)
check_call([
"cmake",
os.path.expandvars("-DCMAKE_TOOLCHAIN_FILE=emsdk/emscripten/1.38.18/cmake/Modules/Platform/Emscripten.cmake"),
"-DCMAKE_BUILD_TYPE=Release",
"-DCMAKE_MAKE_PROGRAM=make",
"-G", "Unix Makefiles",
".."
])
check_call(["make"])
check_call(["cp","aicpp.js", "../../../../web/troikajs/src/app/emscripten/aicpp.js"])
check_call(["cp","aicpp.js", "../../../../app/troikajs/src/app/emscripten/aicpp.js"])
|
# coding: utf8
{
'Tamil': 'தமிழ்',
}
|
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets
def clear_layout(layout):
for i in reversed(range(layout.count())):
item = layout.itemAt(i)
if isinstance(item, QtWidgets.QSpacerItem):
layout.removeItem(item)
if isinstance(item, QtWidgets.QWidgetItem):
item.widget().setParent(None)
|
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import unittest
import numpy as np
from numpy.testing import assert_equal
import graspologic.subgraph as sg
class TestEstimateSubgraph(unittest.TestCase):
def test_estimate_subgraph_coh(self):
ys = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1])
blank = np.ones((10, 10))
blank[1:6, 0] = 0
A = np.ones((10, 10, 10))
for ind in range(10):
if ys[ind] == 1:
A[:, :, ind] = blank
test_model = sg.SignalSubgraph()
estsub = test_model.fit_transform(A, ys, [5, 1])
ver = np.ones((10, 10))
ver[estsub] = 0
np.testing.assert_array_equal(blank, ver)
def test_estimate_subgraph_inc(self):
ys = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1])
blank = np.ones((10, 10))
blank[1:6, 0] = 0
A = np.ones((10, 10, 10))
for ind in range(10):
if ys[ind] == 1:
A[:, :, ind] = blank
test_model = sg.SignalSubgraph()
estsub = test_model.fit_transform(A, ys, 5)
ver = np.ones((10, 10))
ver[estsub] = 0
np.testing.assert_array_equal(blank, ver)
def test_fit_bad_constraints(self):
A = np.ones((5, 5, 5))
ys = np.ones(5)
test_model = sg.SignalSubgraph()
with self.assertRaises(TypeError):
test_model.fit(A, ys, [1])
with self.assertRaises(TypeError):
test_model.fit(A, ys, [1, 1, 1])
def test_construct_contingency(self):
A = np.ones((1, 1, 5))
A[:, :, 1::2] = 0
ys = np.array([1, 0, 1, 0, 0])
test_model = sg.SignalSubgraph()
test_model.fit(A, ys, 1)
test_model._SignalSubgraph__construct_contingency()
cmat = test_model.contmat_
ver = np.array([[[[1, 2], [2, 0]]]], dtype=float)
np.testing.assert_array_equal(cmat, ver)
def test_fit_bad_type(self):
A = [[[1 for i in range(5)] for j in range(5)] for k in range(5)]
ys = [1, 1, 1, 1, 1]
test_model = sg.SignalSubgraph()
with self.assertRaises(TypeError):
test_model.fit(A, np.ones(5), 1)
with self.assertRaises(TypeError):
test_model.fit(A, set(ys), 1)
def test_fit_bad_size(self):
test_model = sg.SignalSubgraph()
with self.assertRaises(ValueError):
test_model.fit(np.ones((5, 5)), np.ones(5), 1)
with self.assertRaises(ValueError):
test_model.fit(np.ones((3, 4, 2)), np.ones(2), 1)
def test_fit_bad_len(self):
A = np.ones((3, 3, 3))
test_model = sg.SignalSubgraph()
with self.assertRaises(ValueError):
test_model.fit(A, np.ones((3, 3)), 1)
with self.assertRaises(ValueError):
test_model.fit(A, np.array([0, 1, 2]), 1)
with self.assertRaises(ValueError):
test_model.fit(A, np.ones(2), 1)
|
# SPDX-FileCopyrightText: 2021 easyDiffraction contributors <support@easydiffraction.org>
# SPDX-License-Identifier: BSD-3-Clause
# © 2021 Contributors to the easyDiffraction project <https://github.com/easyScience/easyDiffractionApp>
__author__ = "github.com/AndrewSazonov"
__version__ = '0.0.1'
import os, sys
import importlib
import Functions, Config
CONFIG = Config.Config()
def pythonLibLocation():
if len(sys.argv) > 1:
return os.path.join(sys.argv[1], 'lib')
return '@rpath'
def pythonDylib():
python_dylib_file = {
# 'macos': 'Python',
'macos': 'libpython3.7m.dylib',
'ubuntu': 'libpython3.7m.so.1.0',
'windows': None
}[CONFIG.os]
return None if python_dylib_file is None else os.path.join(pythonLibLocation(), python_dylib_file)
def crysfmlPythonDylib():
d = {
#'macos': '/Library/Frameworks/Python.framework/Versions/3.7/Python',
'macos': '/usr/local/Cellar/python@3.7/3.7.9/Frameworks/Python.framework/Versions/3.7/lib/libpython3.7m.dylib',
'ubuntu': 'libpython3.7m.so.1.0',
'windows': None
}
return d[CONFIG.os]
def rpath():
d = {
'macos': '@executable_path',
'ubuntu': './libsLinux/lib',
'windows': None
}
return d[CONFIG.os]
#def crysfmlRpath():
# d = {
# 'macos': '/opt/intel//compilers_and_libraries_2020.2.258/mac/compiler/lib',
# 'ubuntu': None,
# 'windows': None
# }
# return d[CONFIG.os]
def crysfmlSoFile():
lib = CONFIG['ci']['pyinstaller']['libs'][CONFIG.os]
lib_path = importlib.import_module(lib).__path__[0]
so_location = os.path.join(lib_path, 'CFML_api')
so_file = {
'macos': 'crysfml_api.so',
'ubuntu': 'crysfml_api.so',
'windows': None
}[CONFIG.os]
return None if so_file is None else os.path.join(so_location, so_file)
def relinkCrysfml():
if CONFIG.os == 'windows':
Functions.printNeutralMessage(f'No CrysFML relinking is needed for platform {CONFIG.os}')
return
Functions.printNeutralMessage(f"pythonLibLocation: {pythonLibLocation()}")
Functions.printNeutralMessage(f"crysfmlPythonDylib: {crysfmlPythonDylib()}")
Functions.printNeutralMessage(f"pythonDylib: {pythonDylib()}")
#Functions.printNeutralMessage(f"crysfmlRpath: {crysfmlRpath()}")
Functions.printNeutralMessage(f"rpath: {rpath()}")
Functions.printNeutralMessage(f"crysfmlSoFile: {crysfmlSoFile()}")
try:
message = f'relink CrysFML from default Python dylib for platform {CONFIG.os}'
if CONFIG.os == 'macos':
Functions.run('otool', '-l', crysfmlSoFile())
Functions.run('otool', '-L', crysfmlSoFile())
#Functions.run('install_name_tool', '-rpath', crysfmlRpath(), rpath(), crysfmlSoFile())
##Functions.run('install_name_tool', '-add_rpath', rpath(), crysfmlSoFile())
##Functions.run('install_name_tool', '-add_rpath', pythonLibLocation(), crysfmlSoFile())
Functions.run('install_name_tool', '-change', crysfmlPythonDylib(), pythonDylib(), crysfmlSoFile())
Functions.run('otool', '-l', crysfmlSoFile())
Functions.run('otool', '-L', crysfmlSoFile())
elif CONFIG.os == '---ubuntu':
Functions.run('sudo', 'apt-get', 'update', '-y')
Functions.run('sudo', 'apt-get', 'install', '-y', 'patchelf')
Functions.run('sudo', 'apt-get', 'install', '-y', 'chrpath')
# Python lib
Functions.run('chrpath', '--list', crysfmlSoFile())
Functions.run('patchelf', '--set-rpath', rpath(), crysfmlSoFile())
#Functions.run('patchelf', '--replace-needed', crysfmlPythonDylib(), pythonDylib(), crysfmlSoFile())
# Intel fortran libs
# Instead of LD_LIBRARY_PATH...
#import libsLinux
#lib_path = os.path.join(list(libsLinux.__path__)[0], 'lib')
#libs = ['libifcoremt.so.5', 'libifport.so.5', 'libimf.so', 'libintlc.so.5', 'libsvml.so']
#for lib in libs:
# Functions.run('patchelf', '--replace-needed', lib, os.path.join(lib_path, lib), crysfmlSoFile())
# https://nehckl0.medium.com/creating-relocatable-linux-executables-by-setting-rpath-with-origin-45de573a2e98
# https://github.com/microsoft/ShaderConductor/issues/52
# https://unix.stackexchange.com/questions/479421/how-to-link-to-a-shared-library-with-a-relative-path
else:
Functions.printFailMessage(f'Platform {CONFIG.os} is unsupported')
except Exception as exception:
Functions.printFailMessage(message, exception)
sys.exit()
else:
Functions.printSuccessMessage(message)
if __name__ == "__main__":
relinkCrysfml()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetAccountResult:
"""
A collection of values returned by getAccount.
"""
def __init__(__self__, account_id=None, display_name=None, email=None, name=None, project=None, unique_id=None, id=None):
if account_id and not isinstance(account_id, str):
raise TypeError("Expected argument 'account_id' to be a str")
__self__.account_id = account_id
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
__self__.display_name = display_name
"""
The display name for the service account.
"""
if email and not isinstance(email, str):
raise TypeError("Expected argument 'email' to be a str")
__self__.email = email
"""
The e-mail address of the service account. This value
should be referenced from any `organizations.getIAMPolicy` data sources
that would grant the service account privileges.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
"""
The fully-qualified name of the service account.
"""
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
__self__.project = project
if unique_id and not isinstance(unique_id, str):
raise TypeError("Expected argument 'unique_id' to be a str")
__self__.unique_id = unique_id
"""
The unique id of the service account.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetAccountResult(GetAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountResult(
account_id=self.account_id,
display_name=self.display_name,
email=self.email,
name=self.name,
project=self.project,
unique_id=self.unique_id,
id=self.id)
def get_account(account_id=None,project=None,opts=None):
"""
Get the service account from a project. For more information see
the official [API](https://cloud.google.com/compute/docs/access/service-accounts) documentation.
:param str account_id: The Service account id. (This is the part of the service account's email field that comes before the @ symbol.)
:param str project: The ID of the project that the service account is present in.
Defaults to the provider project configuration.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/d/service_account.html.markdown.
"""
__args__ = dict()
__args__['accountId'] = account_id
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:serviceAccount/getAccount:getAccount', __args__, opts=opts).value
return AwaitableGetAccountResult(
account_id=__ret__.get('accountId'),
display_name=__ret__.get('displayName'),
email=__ret__.get('email'),
name=__ret__.get('name'),
project=__ret__.get('project'),
unique_id=__ret__.get('uniqueId'),
id=__ret__.get('id'))
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from skimage import io as skio
from skimage import img_as_ubyte
from skimage.color import rgba2rgb
from gt_converter.convert_coco import CocoConverter
def test_segmentated_image():
"""
Test a single image coco annotation
"""
converter = CocoConverter()
current_annotation_id = 5
category_ids = {
"(255, 127, 14)": 0,
"(31, 119, 180)": 1,
"(44, 160, 44)": 2,
}
img = img_as_ubyte(rgba2rgb(skio.imread("test/data/img1_annotated.png")))
current_annotation_id, result = converter._annotate_single_image(
image=img,
image_id=0,
category_ids=category_ids,
current_annotation_id=current_annotation_id,
)
assert current_annotation_id == 7 # There are two annotations in this image
print(result)
def test_segmentation_job_conversion(tmpdir):
"""
This test will only pass with credentials for GT labeling job and S3 bucket.
"""
job_name = "gt-converter-demo-job"
converter = CocoConverter()
converter.convert_job(job_name, output_coco_json_path=tmpdir + "output.json")
with open(tmpdir + "output.json", "r") as outfile:
print(outfile.readlines())
def test_boundingbox_job_conversion(tmpdir):
"""
This test will only pass with credentials for GT labeling job and S3 bucket.
"""
job_name = "gt-converter-demo-job-boundingbox"
converter = CocoConverter()
converter.convert_job(job_name, output_coco_json_path=tmpdir + "output.json")
with open(tmpdir + "output.json", "r") as outfile:
print(outfile.readlines())
def test_videotracking_job_conversion(tmpdir):
"""
This test will only pass with credentials for GT labeling job and S3 bucket.
"""
job_name = "MOT20example-clone"
converter = CocoConverter()
converter.convert_job(job_name, output_coco_json_path=tmpdir + "output.json")
with open(tmpdir + "output.json", "r") as outfile:
print(outfile.readlines())
test_videotracking_job_conversion("/tmp/")
|
import shutil
import tempfile
import os
import artm
def test_func():
# constants
num_tokens = 15
parent_level_weight = 1
num_collection_passes = 15
num_document_passes = 10
num_topics_level0 = 15
num_topics_level1 = 50
regularizer_tau = 10 ** 5
vocab_size = 6906
num_docs = 3430
zero_eps = 0.001
data_path = os.environ.get('BIGARTM_UNITTEST_DATA')
batches_folder = tempfile.mkdtemp()
parent_batch_folder = tempfile.mkdtemp()
try:
batch_vectorizer = artm.BatchVectorizer(data_path=data_path,
data_format='bow_uci',
collection_name='kos',
target_folder=batches_folder)
dictionary = artm.Dictionary()
dictionary.gather(data_path=batch_vectorizer.data_path)
hier = artm.hARTM(dictionary=dictionary, cache_theta=True, num_document_passes=num_document_passes)
level0 = hier.add_level(num_topics=num_topics_level0)
level0.initialize(dictionary=dictionary)
level0.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=num_collection_passes)
hier.tmp_files_path = parent_batch_folder
level1 = hier.add_level(num_topics=num_topics_level1, parent_level_weight=parent_level_weight)
level1.initialize(dictionary=dictionary)
level1.regularizers.add(artm.HierarchySparsingThetaRegularizer(name="HierSp", tau=regularizer_tau))
level1.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=num_collection_passes)
phi = hier.get_level(1).get_phi()
assert phi.shape == (vocab_size, num_topics_level1)
# theta = hier.get_level(1).get_theta()
# assert theta.shape == (num_topics_level1, num_docs)
psi = hier.get_level(1).get_psi()
support = psi.values.max(axis=1).min()
# This test gives different results on python27 and python35. Authors need to investigate.
on_python_27 = abs(support - 0.0978 < zero_eps)
on_python_35 = abs(support - 0.1522 < zero_eps)
assert(on_python_27 or on_python_35)
finally:
shutil.rmtree(batches_folder)
shutil.rmtree(parent_batch_folder)
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import sys
from telemetry.core import util
from telemetry.page import block_page_measurement_results
from telemetry.page import buildbot_page_measurement_results
from telemetry.page import csv_page_measurement_results
from telemetry.page import gtest_test_results
from telemetry.page import html_page_measurement_results
from telemetry.page import page_measurement
from telemetry.page import page_measurement_results
# Allowed output formats. The default is the first item in the list.
_OUTPUT_FORMAT_CHOICES = ('html', 'buildbot', 'block', 'csv', 'gtest', 'none')
def AddResultsOptions(parser):
group = optparse.OptionGroup(parser, 'Results options')
group.add_option('--output-format',
default=_OUTPUT_FORMAT_CHOICES[0],
choices=_OUTPUT_FORMAT_CHOICES,
help='Output format. Defaults to "%%default". '
'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES))
group.add_option('-o', '--output',
dest='output_file',
help='Redirects output to a file. Defaults to stdout.')
group.add_option('--output-trace-tag',
default='',
help='Append a tag to the key of each result trace.')
parser.add_option_group(group)
def PrepareResults(test, options):
if not isinstance(test, page_measurement.PageMeasurement):
# Sort of hacky. The default for non-Measurements should be "gtest."
if options.output_format != 'none':
options.output_format = 'gtest'
if options.output_format == 'html' and not options.output_file:
options.output_file = os.path.join(util.GetBaseDir(), 'results.html')
if hasattr(options, 'output_file') and options.output_file:
output_file = os.path.expanduser(options.output_file)
open(output_file, 'a').close() # Create file if it doesn't exist.
output_stream = open(output_file, 'r+')
else:
output_stream = sys.stdout
if not hasattr(options, 'output_format'):
options.output_format = _OUTPUT_FORMAT_CHOICES[0]
if not hasattr(options, 'output_trace_tag'):
options.output_trace_tag = ''
if options.output_format == 'none':
return page_measurement_results.PageMeasurementResults(
trace_tag=options.output_trace_tag)
elif options.output_format == 'csv':
return csv_page_measurement_results.CsvPageMeasurementResults(
output_stream,
test.results_are_the_same_on_every_page)
elif options.output_format == 'block':
return block_page_measurement_results.BlockPageMeasurementResults(
output_stream)
elif options.output_format == 'buildbot':
return buildbot_page_measurement_results.BuildbotPageMeasurementResults(
trace_tag=options.output_trace_tag)
elif options.output_format == 'gtest':
return gtest_test_results.GTestTestResults(output_stream)
elif options.output_format == 'html':
return html_page_measurement_results.HtmlPageMeasurementResults(
output_stream, test.__class__.__name__, options.browser_type,
trace_tag=options.output_trace_tag)
else:
# Should never be reached. The parser enforces the choices.
raise Exception('Invalid --output-format "%s". Valid choices are: %s'
% (options.output_format,
', '.join(_OUTPUT_FORMAT_CHOICES)))
|
import glob
import os
import pika
from pymongo import MongoClient
from ConfigParser import ConfigParser
def make_coll(db_auth, db_user, db_pass, mongo_server_ip='127.0.0.1'):
"""
Function to establish a connection to a local MonoDB instance.
Parameters
----------
coll_name: String.
Name of MongoDB collection to retrieve.
db_auth: String.
MongoDB database that should be used for user authentication.
db_user: String.
Username for MongoDB authentication.
db_user: String.
Password for MongoDB authentication.
Returns
-------
collection: pymongo.collection.Collection.
Collection within MongoDB that holds the scraped news stories.
"""
connection = MongoClient(mongo_server_ip)
if db_auth:
connection[db_auth].authenticate(db_user, db_pass)
db = connection.event_scrape
collection = db['stories']
return collection
def make_queue(host='localhost'):
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=host))
channel = connection.channel()
channel.queue_declare(queue='scraper_queue', durable=True)
return channel
def parse_config():
"""Function to parse the config file."""
config_file = glob.glob('config.ini')
if config_file:
print 'Found a config file in working directory'
else:
cwd = os.path.abspath(os.path.dirname(__file__))
config_file = os.path.join(cwd, 'default_config.ini')
print 'No config found. Using default.'
config_dict = dict()
parser = ConfigParser(allow_no_value=True)
parser.read(config_file)
for section in parser.sections():
for option in parser.options(section):
config_dict[option] = parser.get(section, option)
# Handle the proxy list info
plist = config_dict.get('proxy_list')
config_dict['proxy_list'] = plist.split(',') if type(plist) is str else []
return config_dict
|
from __future__ import print_function, division
import os
from ..hdf5 import dendro_import_hdf5
DATA = os.path.join(os.path.dirname(__file__), 'data')
def test_import_old():
# Check that we are backward-compatible
dendro_import_hdf5(os.path.join(DATA, 'dendro_old.hdf5'))
def test_import():
dendro_import_hdf5(os.path.join(DATA, 'dendro.hdf5'))
|
# -*- coding: UTF-8 -*-
# File: concurrency.py
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
# Credit belongs to Xinyu Zhou
import threading
import multiprocessing
import atexit
import bisect
from contextlib import contextmanager
import signal
import weakref
import six
if six.PY2:
import subprocess32 as subprocess
else:
import subprocess
from six.moves import queue
from . import logger
__all__ = ['StoppableThread', 'LoopThread', 'ensure_proc_terminate',
'OrderedResultGatherProc', 'OrderedContainer', 'DIE',
'mask_sigint', 'start_proc_mask_signal']
class StoppableThread(threading.Thread):
"""
A thread that has a 'stop' event.
"""
def __init__(self):
super(StoppableThread, self).__init__()
self._stop_evt = threading.Event()
def stop(self):
""" stop the thread"""
self._stop_evt.set()
def stopped(self):
""" check whether the thread is stopped or not"""
return self._stop_evt.isSet()
def queue_put_stoppable(self, q, obj):
""" put obj to queue, but will give up if the thread is stopped"""
while not self.stopped():
try:
q.put(obj, timeout=5)
break
except queue.Full:
pass
def queue_get_stoppable(self, q):
""" take obj from queue, but will give up if the thread is stopped"""
while not self.stopped():
try:
return q.get(timeout=5)
except queue.Empty:
pass
class LoopThread(StoppableThread):
""" A pausable thread that simply runs a loop"""
def __init__(self, func, pausable=True):
"""
:param func: the function to run
"""
super(LoopThread, self).__init__()
self._func = func
self._pausable = pausable
if pausable:
self._lock = threading.Lock()
self.daemon = True
def run(self):
while not self.stopped():
if self._pausable:
self._lock.acquire()
self._lock.release()
self._func()
def pause(self):
assert self._pausable
self._lock.acquire()
def resume(self):
assert self._pausable
self._lock.release()
class DIE(object):
""" A placeholder class indicating end of queue """
pass
def ensure_proc_terminate(proc):
if isinstance(proc, list):
for p in proc:
ensure_proc_terminate(p)
return
def stop_proc_by_weak_ref(ref):
proc = ref()
if proc is None:
return
if not proc.is_alive():
return
proc.terminate()
proc.join()
assert isinstance(proc, multiprocessing.Process)
atexit.register(stop_proc_by_weak_ref, weakref.ref(proc))
@contextmanager
def mask_sigint():
sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
yield
signal.signal(signal.SIGINT, sigint_handler)
def start_proc_mask_signal(proc):
if not isinstance(proc, list):
proc = [proc]
with mask_sigint():
for p in proc:
p.start()
def subproc_call(cmd, timeout=None):
try:
output = subprocess.check_output(
cmd, stderr=subprocess.STDOUT,
shell=True, timeout=timeout)
return output
except subprocess.TimeoutExpired as e:
logger.warn("Command timeout!")
logger.warn(e.output)
except subprocess.CalledProcessError as e:
logger.warn("Commnad failed: {}".format(e.returncode))
logger.warn(e.output)
class OrderedContainer(object):
"""
Like a priority queue, but will always wait for item with index (x+1) before producing (x+2).
"""
def __init__(self, start=0):
self.ranks = []
self.data = []
self.wait_for = start
def put(self, rank, val):
idx = bisect.bisect(self.ranks, rank)
self.ranks.insert(idx, rank)
self.data.insert(idx, val)
def has_next(self):
if len(self.ranks) == 0:
return False
return self.ranks[0] == self.wait_for
def get(self):
assert self.has_next()
ret = self.data[0]
rank = self.ranks[0]
del self.ranks[0]
del self.data[0]
self.wait_for += 1
return rank, ret
class OrderedResultGatherProc(multiprocessing.Process):
"""
Gather indexed data from a data queue, and produce results with the
original index-based order.
"""
def __init__(self, data_queue, nr_producer, start=0):
"""
:param data_queue: a multiprocessing.Queue to produce input dp
:param nr_producer: number of producer processes. Will terminate after receiving this many of DIE sentinel.
:param start: the first task index
"""
super(OrderedResultGatherProc, self).__init__()
self.data_queue = data_queue
self.ordered_container = OrderedContainer(start=start)
self.result_queue = multiprocessing.Queue()
self.nr_producer = nr_producer
def run(self):
nr_end = 0
try:
while True:
task_id, data = self.data_queue.get()
if task_id == DIE:
self.result_queue.put((task_id, data))
nr_end += 1
if nr_end == self.nr_producer:
return
else:
self.ordered_container.put(task_id, data)
while self.ordered_container.has_next():
self.result_queue.put(self.ordered_container.get())
except Exception as e:
import traceback
traceback.print_exc()
raise e
def get(self):
return self.result_queue.get()
|
class POI:
def __init__(self, google_places_id, name, latitude, longitude, weight_determining_type, weight = -1):
self.google_places_id = google_places_id
self.name = name
self.latitude = latitude
self.longitude = longitude
self.type = weight_determining_type
self.weight = weight
if weight == -1:
self.get_weight_determining_type()
def get_weight_determining_type(self):
from Database import Database
Database = Database('transit_planner.db')
new_type = ''
current_max_weight = -1
for potential_type in self.type:
Database.cursor.execute('SELECT * FROM types WHERE type = ?', (potential_type,))
for row in Database.cursor.fetchall():
if row[1] > current_max_weight:
current_max_weight = row[1]
new_type = row[0]
self.type = new_type
self.weight = current_max_weight
Database.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# MEASURE - Master Equation Automatic Solver for Unimolecular REactions
#
# Copyright (c) 2010 by Joshua W. Allen (jwallen@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This is the primary MEASURE module. To run MEASURE, invoke this script
via ::
$ python measure.py FILE
where ``FILE`` is the path to a valid MEASURE input file describing the job
to be run and providing the necessary information about the unimolecular
reaction network. Other command-line arguments control the level of
verbosity of information printed to the console.
"""
import argparse
import logging
import time
import os.path
import numpy
################################################################################
def parseCommandLineArguments():
"""
Parse the command-line arguments being passed to MEASURE. These are
described in the module docstring.
"""
parser = argparse.ArgumentParser(description="""
Master Equation Automatic Solver for Unimolecular REactions (MEASURE):
A tool for estimating pressure-dependent phenomenological rate
coefficients k(T,P) for unimolecular reaction networks of arbitrary
size and complexity using the master equation. Multiple methods of
varying accuracy, speed, and robustness are available for determining
the k(T,P) values. The output is a set of k(T,P) functions suitable for
use in chemical kinetics mechanisms.
""")
parser.add_argument('file', metavar='FILE', type=str, nargs=1,
help='a file containing information about the network')
group1 = parser.add_mutually_exclusive_group()
group1.add_argument('-d', '--draw', metavar='IMGFILE', type=str, nargs=1,
help='draw potential energy surface and exit')
group1.add_argument('-o', '--output', metavar='OUTFILE', type=str, nargs=1,
help='specify location of output file')
# Options for controlling the amount of information printed to the console
# By default a moderate level of information is printed; you can either
# ask for less (quiet), more (verbose), or much more (debug)
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('-q', '--quiet', action='store_true', help='only print warnings and errors')
group2.add_argument('-v', '--verbose', action='store_true', help='print more verbose output')
return parser.parse_args()
################################################################################
def initializeLogging(args):
"""
Initialize the logging system. The level of information printed is
determined by looking at the ``args.quiet`` and ``args.verbose`` attributes
to see if either of the corresponding flags were set. The parameter `args`
is an object returned by the ``argparse`` module.
"""
# Set up logging system
level = logging.INFO
if args.quiet:
level = logging.WARNING
elif args.verbose:
level = logging.DEBUG
# Reassign the level names so that they look better on printing
logging.addLevelName(logging.CRITICAL, 'CRITICAL: ')
logging.addLevelName(logging.ERROR, 'ERROR: ')
logging.addLevelName(logging.WARNING, 'Warning: ')
logging.addLevelName(logging.INFO, '')
logging.addLevelName(logging.DEBUG, '')
# Create logger
logger = logging.getLogger()
logger.setLevel(level)
# Create console handler and set level to debug
# Also send everything to stdout rather than stderr
import sys
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(level)
# Create formatter and add to console handler
formatter = logging.Formatter('%(levelname)s%(message)s')
ch.setFormatter(formatter)
# Remove any old handlers that might exist
while logger.handlers:
logger.removeHandler(logger.handlers[0])
# Add ch to logger
logger.addHandler(ch)
################################################################################
def logHeader(level=logging.INFO):
"""
Output a header containing identifying information about RMG to the log.
"""
logging.log(level, '###############################################################')
logging.log(level, '# Master Equation Automatic Solver for Unimolecular REactions #')
logging.log(level, '# (MEASURE) #')
logging.log(level, '# Release: 0.1.0 (7 July 2010) #')
logging.log(level, '# Author: Joshua W. Allen (jwallen@mit.edu) #')
logging.log(level, '# Website: http://jwallen.github.com/MEASURE #')
logging.log(level, '###############################################################\n')
################################################################################
if __name__ == '__main__':
# Parse the command-line arguments
args = parseCommandLineArguments()
# Initialize the logging system
initializeLogging(args)
# Log start timestamp
logging.info('MEASURE execution initiated at ' + time.asctime() + '\n')
# Log header
logHeader()
# Load input file
from measure.input import readInput
params = readInput(args.file[0])
# Only proceed if the input network is valid
if params is not None:
network, Tlist, Plist, Elist, method, model, Tmin, Tmax, Pmin, Pmax = params
Nisom = len(network.isomers)
Nreac = len(network.reactants)
Nprod = len(network.products)
# We will save our output files to the directory containing the input file,
# NOT the current working directory
outputDirectory = os.path.dirname(os.path.abspath(args.file[0]))
# Draw potential energy surface
if args.draw:
logging.info('Drawing potential energy surface...')
network.drawPotentialEnergySurface(args.draw[0])
else:
# Automatically choose a suitable set of energy grains if they were not
# explicitly specified in the input file
if len(Elist) == 2:
logging.info('Automatically determining energy grains...')
grainSize, Ngrains = Elist
Elist = network.autoGenerateEnergyGrains(Tmax=Tmax, grainSize=grainSize, Ngrains=Ngrains)
logging.debug('Using %i energy grains from %g to %g kJ/mol in steps of %g kJ/mol' % (len(Elist), Elist[0] / 1000, Elist[-1] / 1000, (Elist[1] - Elist[0]) / 1000))
logging.debug('')
# Calculate the rate coefficients
K, p0 = network.calculateRateCoefficients(Tlist, Plist, Elist, method)
# Fit interpolation model
from chempy.reaction import Reaction
from measure.reaction import fitInterpolationModel
if model[0] != '':
logging.info('Fitting %s interpolation models...' % model[0])
configurations = []
configurations.extend([[isom] for isom in network.isomers])
configurations.extend([reactants for reactants in network.reactants])
configurations.extend([products for products in network.products])
for i in range(Nisom+Nreac+Nprod):
for j in range(min(i, Nisom+Nreac)):
# Check that we have nonzero k(T,P) values
if (numpy.any(K[:,:,i,j]) and not numpy.all(K[:,:,i,j])):
raise NetworkError('Zero rate coefficient encountered while updating network %s.' % network)
# Make a new net reaction
forward = True
netReaction = Reaction(
reactants=configurations[j],
products=configurations[i],
kinetics=None,
reversible=(i<Nisom+Nreac),
)
network.netReactions.append(netReaction)
# Set/update the net reaction kinetics using interpolation model
netReaction.kinetics = fitInterpolationModel(netReaction, Tlist, Plist,
K[:,:,i,j] if forward else K[:,:,j,i],
model, Tmin, Tmax, Pmin, Pmax, errorCheck=True)
# Save results to file
from measure.output import writeOutput
if args.output:
out = os.path.abspath(args.output[0])
else:
out = os.path.join(outputDirectory, 'output.py')
writeOutput(out, network, Tlist, Plist, Elist, method, model)
# Log end timestamp
logging.info('')
logging.info('MEASURE execution terminated at ' + time.asctime())
|
#
# Copyright (c) 2019 by Delphix. All rights reserved.
#
# flake8: noqa
from dlpx.virtualization.platform import Plugin
plugin = Plugin()
class ArbitraryError(Exception):
@property
def message(self):
return self.args[0]
def __init__(self, message):
super(ArbitraryError, self).__init__(message)
raise ArbitraryError('Got an arbitrary non-platforms error for testing.')
|
# -*- coding: utf-8 -*-
# Functions for tracing the contour are provided in this file
import cv2
import numpy as np
# This is the search pattern of the neighbor points
neighbors = np.array([[-1, 0], [0, 1], [1, 0], [0, -1],
[-1, -1], [-1, 1], [1, -1], [1, 1]], dtype=np.int8)
# Check if the point has been visited
def existPoint(Visited, x, y):
for p in Visited:
if p[0] == x and p[1] == y:
return True
return False
# Find the point next to the current point based on the search pattern
def findNextPoint(Image, x, y, Points):
for n in neighbors:
x1 = x + n[0]
y1 = y + n[1]
if 0 <= x1 <= Image.shape[0] and 0 <= y1 <= Image.shape[1]:
# print(str(x1) + ', ' + str(y1))
# print('Pixel value:', Image[x1][y1])
if Image[x1][y1]:
if not existPoint(Points, x1, y1):
# print('found')
return x1, y1
return None
# This function searches for the nearest point based on a Breadth first search
def searchForNearestPoint(Image, StartingPoint, Points, Range):
print('Searching at:', StartingPoint)
# Create a list for visited pixels, a list for the queue
visited = Points[1:].copy()
queue = visited[-2:].copy()
# Search in the queue
while len(queue):
# Get and remove the first element of the queue
s = queue[0]
queue = np.delete(queue, 0, 0)
# Check the neighbor pixels of the current pixel
for n in neighbors:
x1 = s[0] + n[0]
y1 = s[1] + n[1]
# print('Searching for the nearest point at: ' + str(x1) + ', ' + str(y1))
if 0 <= x1 <= Image.shape[0] and 0 <= y1 <= Image.shape[1]:
if not existPoint(visited, x1, y1):
# If the pixel has not been visited
queue = np.vstack((queue, [x1, y1]))
visited = np.vstack((visited, [x1, y1]))
# Check if the pixel is an edge
if Image[x1][y1]:
return x1, y1
else:
# If the pixel has been visited
# Check if the pixel is the first point of the contour
if x1 == Points[0][0] and y1 == Points[0][1]:
return x1, y1
# If the searching range exceeded the range, end the search
if abs(queue[-1][-2] - StartingPoint[0]) > Range:
return None
# This function draws the contour on a given or a black background
def drawTheContour(Size, Points, Color=(0, 0, 255), LineThickness=1, Background=None, CloseLoop=False):
# Create a black background if the background is not given
if Background is None:
black = np.zeros([Size[0], Size[1], 3], dtype=np.uint8)
else:
black = Background.copy()
# Drawing
temp = Points[0]
firstPoint = Points[0]
for i in range(len(Points)):
point = Points[i]
cv2.line(black, (temp[1], temp[0]), (point[1], point[0]), Color, LineThickness)
temp = point
# If the contour is a closed loop, connect the last point to the first point
if CloseLoop:
cv2.line(black, (temp[1], temp[0]), (firstPoint[1], firstPoint[0]), Color, LineThickness)
return black
# This function trace a contour from a given starting point
def traceTheContour(Image, StartingPoint, Points=None, Range=20):
# Create a list to store the points
if Points is None:
Points = np.array([(StartingPoint[0], StartingPoint[1])], dtype=np.uint32)
isClosedLoop = False
# Tracing points
nextPoint = list(Points[-1])
print('Tracing contour...')
while True:
# Try using the search pattern to find the neighbor point first
nextPoint = findNextPoint(Image, nextPoint[0], nextPoint[1], Points)
# If a neighbor point is found, store the point and continue tracing
if nextPoint is not None:
# print('return: ' + str(nextPoint[0]) + ', ' + str(nextPoint[1]))
Points = np.vstack((Points, [nextPoint[0], nextPoint[1]]))
# print('Points\' shape: ' + str(Points.shape))
# If a neighbor point can not be found
else:
# Check if the current point is next to the first point of the contour
for n in neighbors:
x1 = Points[-1][-2] + n[0]
y1 = Points[-1][-1] + n[1]
if x1 == Points[0][0] and y1 == Points[0][1]:
# If True, the contour is a closed loop, end tracing and return the contour
isClosedLoop = True
print('Closed')
return Points, isClosedLoop
# If the last point is not next to the first point
# Try to search for the nearest edge in the given range by Breadth first search
if not isClosedLoop:
print('Starting searching...')
nearestPoint = searchForNearestPoint(Image, (Points[-1][-2], Points[-1][-1]), Points, Range)
if nearestPoint is not None:
# If a nearest edge can be found, store the point
print('Nearest point found: ' + str(nearestPoint[0]) + ', ' + str(nearestPoint[1]))
Points = np.vstack((Points, [nearestPoint[0], nearestPoint[1]]))
# Check if the point is the first point, if true, end the tracing and return the contour
if nearestPoint[0] == Points[0][0] and nearestPoint[1] == Points[0][1]:
isClosedLoop = True
print('Closed')
return Points, isClosedLoop
# If not, back to tracing again
return traceTheContour(Image, (nearestPoint[0], nearestPoint[1]), Points, Range)
else:
# If a nearest edge can not be found, end the tracing and return the contour
return Points, isClosedLoop
if __name__ == '__main__':
fileName = 'dots.jpg'
# fileName = 'complexSample.jpg'
image = cv2.imread(fileName, 0)
cv2.imshow('aaa', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
startingPoint = (250, 103)
# startingPoint = (13, 312)
ret, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY)
contour, closedLoop = traceTheContour(image, startingPoint)
print('isClosedContour: ' + str(closedLoop))
size = image.shape[:2]
res = drawTheContour(size, contour)
fileName = fileName.split('.')[0] + '_contour.jpg'
# cv2.imwrite(fileName, res)
cv2.imshow(fileName, res)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
# Transposition Cipher Hacker
# https://www.nostarch.com/crackingcodes/ (BSD Licensed)
import pyperclip
import detectEnglish
import transpositionDecrypt
def main():
# You might want to copy & paste this text from the source code at
# https://www.nostarch.com/crackingcodes/:
myMessage = """AaKoosoeDe5 b5sn ma reno ora'lhlrrceey e enlh na indeit n uhoretrm au ieu v er Ne2 gmanw,forwnlbsya apor tE.no euarisfatt e mealefedhsppmgAnlnoe(c -or)alat r lw o eb nglom,Ain one dtes ilhetcdba. t tg eturmudg,tfl1e1 v nitiaicynhrCsaemie-sp ncgHt nie cetrgmnoa yc r,ieaa toesa- e a0m82e1w shcnth ekh gaecnpeutaaieetgn iodhso d ro hAe snrsfcegrt NCsLc b17m8aEheideikfr aBercaeu thllnrshicwsg etriebruaisss d iorr."""
hackedMessage = hackTransposition(myMessage)
if hackedMessage == None:
print('Failed to hack encryption.')
else:
print('Copying hacked message to clipboard:')
print(hackedMessage)
pyperclip.copy(hackedMessage)
def hackTransposition(message):
print('Hacking...')
# Python programs can be stopped at any time by pressing
# Ctrl-C (on Windows) or Ctrl-D (on macOS and Linux):
print('Press Ctrl-C (on Windows) or Ctrl-D (on macOS and Linux to \
quit at any time.')
# Brute-force by looping through every possible key:
for key in range(1, len(message)):
print('Trying key #%s...' % (key))
decryptedText = transpositionDecrypt.decryptMessage(key, message)
if detectEnglish.isEnglish(decryptedText):
# Ask user if this is the correct decryption:
print()
print('Possible encryption hack:')
print('Key %s: %s' % (key, decryptedText[:100]))
print()
print('Enter D if done, anything else to continue hacking:')
response = input('> ')
if response.strip().upper().startswith('D'):
return decryptedText
return None
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from p2ner.abstract.interface import Interface
from messages.messages import GetNeighboursMessage,AskNeighboursMessage
class VizNetInterface(Interface):
def initInterface(self):
pass
def getNeighbours(self,func):
self.func=func
self.neighs={}
for peer in self.parent.getPeers():
self.neighs[peer]={}
self.neighs[peer]['response']=False
self.neighs[peer]['neighs']={}
self.neighs[peer]['msg']=GetNeighboursMessage(peer,self.gotNeighs)
AskNeighboursMessage.send(peer,self.parent.stream.id,self.parent.controlPipe)
def gotNeighs(self,peer,neighs):
if self.neighs[peer]['response']:
print 'already got the neighs from that peer'
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
for p in neighs:
p.plotRtt=sum(p.lastRtt)/len(p.lastRtt)
self.neighs[peer]['response']=True
self.neighs[peer]['neighs']=neighs
self.neighs[peer]['msg']=None
for p in self.neighs.values():
if not p['response']:
return
self.constructNeighs()
def constructNeighs(self):
self.returnNeighs={}
for k,v in self.neighs.items():
peer=(k.getIP(),k.getPort())
self.returnNeighs[peer]=[]
for p in v['neighs']:
self.returnNeighs[peer].append(((p.getIP(),p.getPort()),p.plotRtt))
self.func(self.returnNeighs)
|
from django.conf.urls import include, url
from django.contrib import admin
# from rest_framework.routers import DefaultRouter
# from snippod_starter_demo_app_server.views import api_root
# from rest_framework_nested import routers
#
# from authentication.views import AccountViewSet, LoginView, LogoutView
# from postsold.views import AccountPostsViewSet, PostViewSet
# from snippod_starter_demo_app_server.views import IndexView
#
# router = routers.SimpleRouter()
# router.register(r'accounts', AccountViewSet)
# router.register(r'postsold', PostViewSet)
#
# accounts_router = routers.NestedSimpleRouter(
# router, r'accounts', lookup='authentication'
# )
# accounts_router.register(r'postsold', AccountPostsViewSet)
# router = DefaultRouter()
urlpatterns = [
url(r'', include('main.urls')),
url(r'^api/v1/', include('authentication.urls')),
url(r'^api/v1/', include('posts.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework'))
]
# Redirect to webapp URL
# TODO Server-side rendering
urlpatterns += [
url(r'^.*', include('main.urls')),
]
|
from iiits.config import templates |
# File Name : LFSR_Rand.py
# Description : implement a simple LFSR random number generator
# Author : Ganyuan Cao
def lfsr_gen(init, tap, n):
new_lfsr = init
tap_length = len(tap)
curr_sum = 0
exp_length = len(init) + 1
# iteratively fill the LFSR list
while exp_length <= n:
for i in range(0, tap_length):
curr_sum = curr_sum + tap[i] * new_lfsr[exp_length - (tap_length + 1) + i]
new_lfsr = new_lfsr + [curr_sum % 2]
curr_sum = 0
exp_length = exp_length + 1
return new_lfsr
# Convert the sequence to the integer
def convRandom(lfsr_seq):
num = 0
for i in range(0, len(lfsr_seq)):
num = num + lfsr_seq[i] * (2 ** i)
return num
# convert the binary to list
def binaryConvert(num):
l = []
for i in range(0, len(num)):
l = l + [int(num[i])]
return l
def main():
seed1 = input("Enter seed #1: ")
seed2 = input("Enter seed #2: ")
round = input("Enter a round number: ")
seed1 = '{0:08b}'.format(seed1)
seed2 = '{0:08b}'.format(seed2)
init = binaryConvert(seed1)
tap = binaryConvert(seed2)
lfsr = lfsr_gen(init,tap, round)
print "The generated LFSR sequence is: ", lfsr
numRand = convRandom(lfsr)
print "The random number derived is: ", numRand
if __name__ == "__main__":
main() |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from .add_em_managed_external_exadata_insight_members_details import AddEmManagedExternalExadataInsightMembersDetails
from .add_exadata_insight_members_details import AddExadataInsightMembersDetails
from .autonomous_database_configuration_summary import AutonomousDatabaseConfigurationSummary
from .autonomous_database_insight import AutonomousDatabaseInsight
from .autonomous_database_insight_summary import AutonomousDatabaseInsightSummary
from .awr_hub import AwrHub
from .awr_hub_summary import AwrHubSummary
from .awr_hub_summary_collection import AwrHubSummaryCollection
from .awr_hubs import AwrHubs
from .awr_report import AwrReport
from .awr_snapshot_collection import AwrSnapshotCollection
from .awr_snapshot_summary import AwrSnapshotSummary
from .awr_source_summary import AwrSourceSummary
from .change_database_insight_compartment_details import ChangeDatabaseInsightCompartmentDetails
from .change_enterprise_manager_bridge_compartment_details import ChangeEnterpriseManagerBridgeCompartmentDetails
from .change_exadata_insight_compartment_details import ChangeExadataInsightCompartmentDetails
from .change_host_insight_compartment_details import ChangeHostInsightCompartmentDetails
from .change_operations_insights_private_endpoint_compartment_details import ChangeOperationsInsightsPrivateEndpointCompartmentDetails
from .change_pe_comanaged_database_insight_details import ChangePeComanagedDatabaseInsightDetails
from .connection_details import ConnectionDetails
from .create_awr_hub_details import CreateAwrHubDetails
from .create_database_insight_details import CreateDatabaseInsightDetails
from .create_em_managed_external_database_insight_details import CreateEmManagedExternalDatabaseInsightDetails
from .create_em_managed_external_exadata_insight_details import CreateEmManagedExternalExadataInsightDetails
from .create_em_managed_external_exadata_member_entity_details import CreateEmManagedExternalExadataMemberEntityDetails
from .create_em_managed_external_host_insight_details import CreateEmManagedExternalHostInsightDetails
from .create_enterprise_manager_bridge_details import CreateEnterpriseManagerBridgeDetails
from .create_exadata_insight_details import CreateExadataInsightDetails
from .create_host_insight_details import CreateHostInsightDetails
from .create_macs_managed_external_host_insight_details import CreateMacsManagedExternalHostInsightDetails
from .create_operations_insights_private_endpoint_details import CreateOperationsInsightsPrivateEndpointDetails
from .create_operations_insights_warehouse_details import CreateOperationsInsightsWarehouseDetails
from .create_operations_insights_warehouse_user_details import CreateOperationsInsightsWarehouseUserDetails
from .create_pe_comanaged_database_insight_details import CreatePeComanagedDatabaseInsightDetails
from .credential_by_vault import CredentialByVault
from .credential_details import CredentialDetails
from .credentials_by_source import CredentialsBySource
from .db_external_instance import DBExternalInstance
from .db_external_properties import DBExternalProperties
from .dbos_config_instance import DBOSConfigInstance
from .database_configuration_collection import DatabaseConfigurationCollection
from .database_configuration_metric_group import DatabaseConfigurationMetricGroup
from .database_configuration_summary import DatabaseConfigurationSummary
from .database_details import DatabaseDetails
from .database_insight import DatabaseInsight
from .database_insight_summary import DatabaseInsightSummary
from .database_insights import DatabaseInsights
from .database_insights_collection import DatabaseInsightsCollection
from .disk_group_details import DiskGroupDetails
from .download_operations_insights_warehouse_wallet_details import DownloadOperationsInsightsWarehouseWalletDetails
from .em_managed_external_database_configuration_summary import EmManagedExternalDatabaseConfigurationSummary
from .em_managed_external_database_insight import EmManagedExternalDatabaseInsight
from .em_managed_external_database_insight_summary import EmManagedExternalDatabaseInsightSummary
from .em_managed_external_exadata_insight import EmManagedExternalExadataInsight
from .em_managed_external_exadata_insight_summary import EmManagedExternalExadataInsightSummary
from .em_managed_external_host_configuration_summary import EmManagedExternalHostConfigurationSummary
from .em_managed_external_host_insight import EmManagedExternalHostInsight
from .em_managed_external_host_insight_summary import EmManagedExternalHostInsightSummary
from .enable_database_insight_details import EnableDatabaseInsightDetails
from .enable_em_managed_external_database_insight_details import EnableEmManagedExternalDatabaseInsightDetails
from .enable_em_managed_external_exadata_insight_details import EnableEmManagedExternalExadataInsightDetails
from .enable_em_managed_external_host_insight_details import EnableEmManagedExternalHostInsightDetails
from .enable_exadata_insight_details import EnableExadataInsightDetails
from .enable_host_insight_details import EnableHostInsightDetails
from .enable_macs_managed_external_host_insight_details import EnableMacsManagedExternalHostInsightDetails
from .enable_pe_comanaged_database_insight_details import EnablePeComanagedDatabaseInsightDetails
from .enterprise_manager_bridge import EnterpriseManagerBridge
from .enterprise_manager_bridge_collection import EnterpriseManagerBridgeCollection
from .enterprise_manager_bridge_summary import EnterpriseManagerBridgeSummary
from .enterprise_manager_bridges import EnterpriseManagerBridges
from .exadata_configuration_collection import ExadataConfigurationCollection
from .exadata_configuration_summary import ExadataConfigurationSummary
from .exadata_database_machine_configuration_summary import ExadataDatabaseMachineConfigurationSummary
from .exadata_database_statistics_summary import ExadataDatabaseStatisticsSummary
from .exadata_details import ExadataDetails
from .exadata_diskgroup_statistics_summary import ExadataDiskgroupStatisticsSummary
from .exadata_host_statistics_summary import ExadataHostStatisticsSummary
from .exadata_insight import ExadataInsight
from .exadata_insight_resource_capacity_trend_aggregation import ExadataInsightResourceCapacityTrendAggregation
from .exadata_insight_resource_capacity_trend_summary import ExadataInsightResourceCapacityTrendSummary
from .exadata_insight_resource_forecast_trend_summary import ExadataInsightResourceForecastTrendSummary
from .exadata_insight_resource_insight_utilization_item import ExadataInsightResourceInsightUtilizationItem
from .exadata_insight_resource_statistics import ExadataInsightResourceStatistics
from .exadata_insight_resource_statistics_aggregation import ExadataInsightResourceStatisticsAggregation
from .exadata_insight_summary import ExadataInsightSummary
from .exadata_insight_summary_collection import ExadataInsightSummaryCollection
from .exadata_insights import ExadataInsights
from .exadata_member_collection import ExadataMemberCollection
from .exadata_member_summary import ExadataMemberSummary
from .exadata_storage_server_statistics_summary import ExadataStorageServerStatisticsSummary
from .historical_data_item import HistoricalDataItem
from .host_configuration_collection import HostConfigurationCollection
from .host_configuration_metric_group import HostConfigurationMetricGroup
from .host_configuration_summary import HostConfigurationSummary
from .host_cpu_hardware_configuration import HostCpuHardwareConfiguration
from .host_cpu_statistics import HostCpuStatistics
from .host_cpu_usage import HostCpuUsage
from .host_details import HostDetails
from .host_entities import HostEntities
from .host_hardware_configuration import HostHardwareConfiguration
from .host_importable_agent_entity_summary import HostImportableAgentEntitySummary
from .host_insight import HostInsight
from .host_insight_resource_statistics_aggregation import HostInsightResourceStatisticsAggregation
from .host_insight_summary import HostInsightSummary
from .host_insight_summary_collection import HostInsightSummaryCollection
from .host_insights import HostInsights
from .host_instance_map import HostInstanceMap
from .host_memory_configuration import HostMemoryConfiguration
from .host_memory_statistics import HostMemoryStatistics
from .host_memory_usage import HostMemoryUsage
from .host_network_activity_summary import HostNetworkActivitySummary
from .host_network_configuration import HostNetworkConfiguration
from .host_performance_metric_group import HostPerformanceMetricGroup
from .host_product import HostProduct
from .host_resource_allocation import HostResourceAllocation
from .host_resource_capacity_trend_aggregation import HostResourceCapacityTrendAggregation
from .host_resource_statistics import HostResourceStatistics
from .host_top_processes import HostTopProcesses
from .hosted_entity_collection import HostedEntityCollection
from .hosted_entity_summary import HostedEntitySummary
from .importable_agent_entity_summary import ImportableAgentEntitySummary
from .importable_agent_entity_summary_collection import ImportableAgentEntitySummaryCollection
from .importable_enterprise_manager_entity import ImportableEnterpriseManagerEntity
from .importable_enterprise_manager_entity_collection import ImportableEnterpriseManagerEntityCollection
from .ingest_database_configuration_details import IngestDatabaseConfigurationDetails
from .ingest_database_configuration_response_details import IngestDatabaseConfigurationResponseDetails
from .ingest_host_configuration_details import IngestHostConfigurationDetails
from .ingest_host_configuration_response_details import IngestHostConfigurationResponseDetails
from .ingest_host_metrics_details import IngestHostMetricsDetails
from .ingest_host_metrics_response_details import IngestHostMetricsResponseDetails
from .ingest_sql_bucket_details import IngestSqlBucketDetails
from .ingest_sql_bucket_response_details import IngestSqlBucketResponseDetails
from .ingest_sql_plan_lines_details import IngestSqlPlanLinesDetails
from .ingest_sql_plan_lines_response_details import IngestSqlPlanLinesResponseDetails
from .ingest_sql_stats_details import IngestSqlStatsDetails
from .ingest_sql_stats_response_details import IngestSqlStatsResponseDetails
from .ingest_sql_text_details import IngestSqlTextDetails
from .ingest_sql_text_response_details import IngestSqlTextResponseDetails
from .instance_metrics import InstanceMetrics
from .macs_managed_external_database_configuration_summary import MacsManagedExternalDatabaseConfigurationSummary
from .macs_managed_external_database_insight import MacsManagedExternalDatabaseInsight
from .macs_managed_external_database_insight_summary import MacsManagedExternalDatabaseInsightSummary
from .macs_managed_external_host_configuration_summary import MacsManagedExternalHostConfigurationSummary
from .macs_managed_external_host_insight import MacsManagedExternalHostInsight
from .macs_managed_external_host_insight_summary import MacsManagedExternalHostInsightSummary
from .operations_insights_private_endpoint import OperationsInsightsPrivateEndpoint
from .operations_insights_private_endpoint_collection import OperationsInsightsPrivateEndpointCollection
from .operations_insights_private_endpoint_summary import OperationsInsightsPrivateEndpointSummary
from .operations_insights_warehouse import OperationsInsightsWarehouse
from .operations_insights_warehouse_summary import OperationsInsightsWarehouseSummary
from .operations_insights_warehouse_summary_collection import OperationsInsightsWarehouseSummaryCollection
from .operations_insights_warehouse_user import OperationsInsightsWarehouseUser
from .operations_insights_warehouse_user_summary import OperationsInsightsWarehouseUserSummary
from .operations_insights_warehouse_user_summary_collection import OperationsInsightsWarehouseUserSummaryCollection
from .operations_insights_warehouse_users import OperationsInsightsWarehouseUsers
from .operations_insights_warehouses import OperationsInsightsWarehouses
from .pe_comanaged_database_connection_details import PeComanagedDatabaseConnectionDetails
from .pe_comanaged_database_host_details import PeComanagedDatabaseHostDetails
from .pe_comanaged_database_insight import PeComanagedDatabaseInsight
from .pe_comanaged_database_insight_summary import PeComanagedDatabaseInsightSummary
from .pe_comanaged_managed_external_database_configuration_summary import PeComanagedManagedExternalDatabaseConfigurationSummary
from .projected_data_item import ProjectedDataItem
from .resource_capacity_trend_aggregation import ResourceCapacityTrendAggregation
from .resource_insight_current_utilization import ResourceInsightCurrentUtilization
from .resource_insight_projected_utilization import ResourceInsightProjectedUtilization
from .resource_insight_projected_utilization_item import ResourceInsightProjectedUtilizationItem
from .resource_statistics import ResourceStatistics
from .resource_statistics_aggregation import ResourceStatisticsAggregation
from .resource_usage_summary import ResourceUsageSummary
from .resource_usage_trend_aggregation import ResourceUsageTrendAggregation
from .sql_bucket import SqlBucket
from .sql_insight_aggregation import SqlInsightAggregation
from .sql_insight_aggregation_collection import SqlInsightAggregationCollection
from .sql_insight_thresholds import SqlInsightThresholds
from .sql_inventory import SqlInventory
from .sql_plan_collection import SqlPlanCollection
from .sql_plan_insight_aggregation import SqlPlanInsightAggregation
from .sql_plan_insight_aggregation_collection import SqlPlanInsightAggregationCollection
from .sql_plan_insights import SqlPlanInsights
from .sql_plan_line import SqlPlanLine
from .sql_plan_summary import SqlPlanSummary
from .sql_response_time_distribution_aggregation import SqlResponseTimeDistributionAggregation
from .sql_response_time_distribution_aggregation_collection import SqlResponseTimeDistributionAggregationCollection
from .sql_search_collection import SqlSearchCollection
from .sql_search_summary import SqlSearchSummary
from .sql_statistic_aggregation import SqlStatisticAggregation
from .sql_statistic_aggregation_collection import SqlStatisticAggregationCollection
from .sql_statistics import SqlStatistics
from .sql_statistics_time_series import SqlStatisticsTimeSeries
from .sql_statistics_time_series_aggregation import SqlStatisticsTimeSeriesAggregation
from .sql_statistics_time_series_aggregation_collection import SqlStatisticsTimeSeriesAggregationCollection
from .sql_statistics_time_series_by_plan_aggregation import SqlStatisticsTimeSeriesByPlanAggregation
from .sql_statistics_time_series_by_plan_aggregation_collection import SqlStatisticsTimeSeriesByPlanAggregationCollection
from .sql_stats import SqlStats
from .sql_text import SqlText
from .sql_text_collection import SqlTextCollection
from .sql_text_summary import SqlTextSummary
from .storage_server_details import StorageServerDetails
from .summarize_awr_sources_summaries_collection import SummarizeAwrSourcesSummariesCollection
from .summarize_database_insight_resource_capacity_trend_aggregation_collection import SummarizeDatabaseInsightResourceCapacityTrendAggregationCollection
from .summarize_database_insight_resource_forecast_trend_aggregation import SummarizeDatabaseInsightResourceForecastTrendAggregation
from .summarize_database_insight_resource_statistics_aggregation_collection import SummarizeDatabaseInsightResourceStatisticsAggregationCollection
from .summarize_database_insight_resource_usage_aggregation import SummarizeDatabaseInsightResourceUsageAggregation
from .summarize_database_insight_resource_usage_trend_aggregation_collection import SummarizeDatabaseInsightResourceUsageTrendAggregationCollection
from .summarize_database_insight_resource_utilization_insight_aggregation import SummarizeDatabaseInsightResourceUtilizationInsightAggregation
from .summarize_database_insight_tablespace_usage_trend_aggregation_collection import SummarizeDatabaseInsightTablespaceUsageTrendAggregationCollection
from .summarize_exadata_insight_resource_capacity_trend_aggregation import SummarizeExadataInsightResourceCapacityTrendAggregation
from .summarize_exadata_insight_resource_capacity_trend_collection import SummarizeExadataInsightResourceCapacityTrendCollection
from .summarize_exadata_insight_resource_forecast_trend_aggregation import SummarizeExadataInsightResourceForecastTrendAggregation
from .summarize_exadata_insight_resource_forecast_trend_collection import SummarizeExadataInsightResourceForecastTrendCollection
from .summarize_exadata_insight_resource_statistics_aggregation_collection import SummarizeExadataInsightResourceStatisticsAggregationCollection
from .summarize_exadata_insight_resource_usage_aggregation import SummarizeExadataInsightResourceUsageAggregation
from .summarize_exadata_insight_resource_usage_collection import SummarizeExadataInsightResourceUsageCollection
from .summarize_exadata_insight_resource_utilization_insight_aggregation import SummarizeExadataInsightResourceUtilizationInsightAggregation
from .summarize_host_insight_resource_capacity_trend_aggregation_collection import SummarizeHostInsightResourceCapacityTrendAggregationCollection
from .summarize_host_insight_resource_forecast_trend_aggregation import SummarizeHostInsightResourceForecastTrendAggregation
from .summarize_host_insight_resource_statistics_aggregation_collection import SummarizeHostInsightResourceStatisticsAggregationCollection
from .summarize_host_insight_resource_usage_aggregation import SummarizeHostInsightResourceUsageAggregation
from .summarize_host_insight_resource_usage_trend_aggregation_collection import SummarizeHostInsightResourceUsageTrendAggregationCollection
from .summarize_host_insight_resource_utilization_insight_aggregation import SummarizeHostInsightResourceUtilizationInsightAggregation
from .summarize_host_insights_top_processes_usage_trend_collection import SummarizeHostInsightsTopProcessesUsageTrendCollection
from .summarize_operations_insights_warehouse_resource_usage_aggregation import SummarizeOperationsInsightsWarehouseResourceUsageAggregation
from .summary_statistics import SummaryStatistics
from .tablespace_usage_trend import TablespaceUsageTrend
from .tablespace_usage_trend_aggregation import TablespaceUsageTrendAggregation
from .top_processes_usage_trend import TopProcessesUsageTrend
from .top_processes_usage_trend_aggregation import TopProcessesUsageTrendAggregation
from .update_autonomous_database_insight_details import UpdateAutonomousDatabaseInsightDetails
from .update_awr_hub_details import UpdateAwrHubDetails
from .update_database_insight_details import UpdateDatabaseInsightDetails
from .update_em_managed_external_database_insight_details import UpdateEmManagedExternalDatabaseInsightDetails
from .update_em_managed_external_exadata_insight_details import UpdateEmManagedExternalExadataInsightDetails
from .update_em_managed_external_host_insight_details import UpdateEmManagedExternalHostInsightDetails
from .update_enterprise_manager_bridge_details import UpdateEnterpriseManagerBridgeDetails
from .update_exadata_insight_details import UpdateExadataInsightDetails
from .update_host_insight_details import UpdateHostInsightDetails
from .update_macs_managed_external_database_insight_details import UpdateMacsManagedExternalDatabaseInsightDetails
from .update_macs_managed_external_host_insight_details import UpdateMacsManagedExternalHostInsightDetails
from .update_operations_insights_private_endpoint_details import UpdateOperationsInsightsPrivateEndpointDetails
from .update_operations_insights_warehouse_details import UpdateOperationsInsightsWarehouseDetails
from .update_operations_insights_warehouse_user_details import UpdateOperationsInsightsWarehouseUserDetails
from .update_pe_comanaged_database_insight_details import UpdatePeComanagedDatabaseInsightDetails
from .work_request import WorkRequest
from .work_request_collection import WorkRequestCollection
from .work_request_error import WorkRequestError
from .work_request_error_collection import WorkRequestErrorCollection
from .work_request_log_entry import WorkRequestLogEntry
from .work_request_log_entry_collection import WorkRequestLogEntryCollection
from .work_request_resource import WorkRequestResource
from .work_requests import WorkRequests
# Maps type names to classes for opsi services.
opsi_type_mapping = {
"AddEmManagedExternalExadataInsightMembersDetails": AddEmManagedExternalExadataInsightMembersDetails,
"AddExadataInsightMembersDetails": AddExadataInsightMembersDetails,
"AutonomousDatabaseConfigurationSummary": AutonomousDatabaseConfigurationSummary,
"AutonomousDatabaseInsight": AutonomousDatabaseInsight,
"AutonomousDatabaseInsightSummary": AutonomousDatabaseInsightSummary,
"AwrHub": AwrHub,
"AwrHubSummary": AwrHubSummary,
"AwrHubSummaryCollection": AwrHubSummaryCollection,
"AwrHubs": AwrHubs,
"AwrReport": AwrReport,
"AwrSnapshotCollection": AwrSnapshotCollection,
"AwrSnapshotSummary": AwrSnapshotSummary,
"AwrSourceSummary": AwrSourceSummary,
"ChangeDatabaseInsightCompartmentDetails": ChangeDatabaseInsightCompartmentDetails,
"ChangeEnterpriseManagerBridgeCompartmentDetails": ChangeEnterpriseManagerBridgeCompartmentDetails,
"ChangeExadataInsightCompartmentDetails": ChangeExadataInsightCompartmentDetails,
"ChangeHostInsightCompartmentDetails": ChangeHostInsightCompartmentDetails,
"ChangeOperationsInsightsPrivateEndpointCompartmentDetails": ChangeOperationsInsightsPrivateEndpointCompartmentDetails,
"ChangePeComanagedDatabaseInsightDetails": ChangePeComanagedDatabaseInsightDetails,
"ConnectionDetails": ConnectionDetails,
"CreateAwrHubDetails": CreateAwrHubDetails,
"CreateDatabaseInsightDetails": CreateDatabaseInsightDetails,
"CreateEmManagedExternalDatabaseInsightDetails": CreateEmManagedExternalDatabaseInsightDetails,
"CreateEmManagedExternalExadataInsightDetails": CreateEmManagedExternalExadataInsightDetails,
"CreateEmManagedExternalExadataMemberEntityDetails": CreateEmManagedExternalExadataMemberEntityDetails,
"CreateEmManagedExternalHostInsightDetails": CreateEmManagedExternalHostInsightDetails,
"CreateEnterpriseManagerBridgeDetails": CreateEnterpriseManagerBridgeDetails,
"CreateExadataInsightDetails": CreateExadataInsightDetails,
"CreateHostInsightDetails": CreateHostInsightDetails,
"CreateMacsManagedExternalHostInsightDetails": CreateMacsManagedExternalHostInsightDetails,
"CreateOperationsInsightsPrivateEndpointDetails": CreateOperationsInsightsPrivateEndpointDetails,
"CreateOperationsInsightsWarehouseDetails": CreateOperationsInsightsWarehouseDetails,
"CreateOperationsInsightsWarehouseUserDetails": CreateOperationsInsightsWarehouseUserDetails,
"CreatePeComanagedDatabaseInsightDetails": CreatePeComanagedDatabaseInsightDetails,
"CredentialByVault": CredentialByVault,
"CredentialDetails": CredentialDetails,
"CredentialsBySource": CredentialsBySource,
"DBExternalInstance": DBExternalInstance,
"DBExternalProperties": DBExternalProperties,
"DBOSConfigInstance": DBOSConfigInstance,
"DatabaseConfigurationCollection": DatabaseConfigurationCollection,
"DatabaseConfigurationMetricGroup": DatabaseConfigurationMetricGroup,
"DatabaseConfigurationSummary": DatabaseConfigurationSummary,
"DatabaseDetails": DatabaseDetails,
"DatabaseInsight": DatabaseInsight,
"DatabaseInsightSummary": DatabaseInsightSummary,
"DatabaseInsights": DatabaseInsights,
"DatabaseInsightsCollection": DatabaseInsightsCollection,
"DiskGroupDetails": DiskGroupDetails,
"DownloadOperationsInsightsWarehouseWalletDetails": DownloadOperationsInsightsWarehouseWalletDetails,
"EmManagedExternalDatabaseConfigurationSummary": EmManagedExternalDatabaseConfigurationSummary,
"EmManagedExternalDatabaseInsight": EmManagedExternalDatabaseInsight,
"EmManagedExternalDatabaseInsightSummary": EmManagedExternalDatabaseInsightSummary,
"EmManagedExternalExadataInsight": EmManagedExternalExadataInsight,
"EmManagedExternalExadataInsightSummary": EmManagedExternalExadataInsightSummary,
"EmManagedExternalHostConfigurationSummary": EmManagedExternalHostConfigurationSummary,
"EmManagedExternalHostInsight": EmManagedExternalHostInsight,
"EmManagedExternalHostInsightSummary": EmManagedExternalHostInsightSummary,
"EnableDatabaseInsightDetails": EnableDatabaseInsightDetails,
"EnableEmManagedExternalDatabaseInsightDetails": EnableEmManagedExternalDatabaseInsightDetails,
"EnableEmManagedExternalExadataInsightDetails": EnableEmManagedExternalExadataInsightDetails,
"EnableEmManagedExternalHostInsightDetails": EnableEmManagedExternalHostInsightDetails,
"EnableExadataInsightDetails": EnableExadataInsightDetails,
"EnableHostInsightDetails": EnableHostInsightDetails,
"EnableMacsManagedExternalHostInsightDetails": EnableMacsManagedExternalHostInsightDetails,
"EnablePeComanagedDatabaseInsightDetails": EnablePeComanagedDatabaseInsightDetails,
"EnterpriseManagerBridge": EnterpriseManagerBridge,
"EnterpriseManagerBridgeCollection": EnterpriseManagerBridgeCollection,
"EnterpriseManagerBridgeSummary": EnterpriseManagerBridgeSummary,
"EnterpriseManagerBridges": EnterpriseManagerBridges,
"ExadataConfigurationCollection": ExadataConfigurationCollection,
"ExadataConfigurationSummary": ExadataConfigurationSummary,
"ExadataDatabaseMachineConfigurationSummary": ExadataDatabaseMachineConfigurationSummary,
"ExadataDatabaseStatisticsSummary": ExadataDatabaseStatisticsSummary,
"ExadataDetails": ExadataDetails,
"ExadataDiskgroupStatisticsSummary": ExadataDiskgroupStatisticsSummary,
"ExadataHostStatisticsSummary": ExadataHostStatisticsSummary,
"ExadataInsight": ExadataInsight,
"ExadataInsightResourceCapacityTrendAggregation": ExadataInsightResourceCapacityTrendAggregation,
"ExadataInsightResourceCapacityTrendSummary": ExadataInsightResourceCapacityTrendSummary,
"ExadataInsightResourceForecastTrendSummary": ExadataInsightResourceForecastTrendSummary,
"ExadataInsightResourceInsightUtilizationItem": ExadataInsightResourceInsightUtilizationItem,
"ExadataInsightResourceStatistics": ExadataInsightResourceStatistics,
"ExadataInsightResourceStatisticsAggregation": ExadataInsightResourceStatisticsAggregation,
"ExadataInsightSummary": ExadataInsightSummary,
"ExadataInsightSummaryCollection": ExadataInsightSummaryCollection,
"ExadataInsights": ExadataInsights,
"ExadataMemberCollection": ExadataMemberCollection,
"ExadataMemberSummary": ExadataMemberSummary,
"ExadataStorageServerStatisticsSummary": ExadataStorageServerStatisticsSummary,
"HistoricalDataItem": HistoricalDataItem,
"HostConfigurationCollection": HostConfigurationCollection,
"HostConfigurationMetricGroup": HostConfigurationMetricGroup,
"HostConfigurationSummary": HostConfigurationSummary,
"HostCpuHardwareConfiguration": HostCpuHardwareConfiguration,
"HostCpuStatistics": HostCpuStatistics,
"HostCpuUsage": HostCpuUsage,
"HostDetails": HostDetails,
"HostEntities": HostEntities,
"HostHardwareConfiguration": HostHardwareConfiguration,
"HostImportableAgentEntitySummary": HostImportableAgentEntitySummary,
"HostInsight": HostInsight,
"HostInsightResourceStatisticsAggregation": HostInsightResourceStatisticsAggregation,
"HostInsightSummary": HostInsightSummary,
"HostInsightSummaryCollection": HostInsightSummaryCollection,
"HostInsights": HostInsights,
"HostInstanceMap": HostInstanceMap,
"HostMemoryConfiguration": HostMemoryConfiguration,
"HostMemoryStatistics": HostMemoryStatistics,
"HostMemoryUsage": HostMemoryUsage,
"HostNetworkActivitySummary": HostNetworkActivitySummary,
"HostNetworkConfiguration": HostNetworkConfiguration,
"HostPerformanceMetricGroup": HostPerformanceMetricGroup,
"HostProduct": HostProduct,
"HostResourceAllocation": HostResourceAllocation,
"HostResourceCapacityTrendAggregation": HostResourceCapacityTrendAggregation,
"HostResourceStatistics": HostResourceStatistics,
"HostTopProcesses": HostTopProcesses,
"HostedEntityCollection": HostedEntityCollection,
"HostedEntitySummary": HostedEntitySummary,
"ImportableAgentEntitySummary": ImportableAgentEntitySummary,
"ImportableAgentEntitySummaryCollection": ImportableAgentEntitySummaryCollection,
"ImportableEnterpriseManagerEntity": ImportableEnterpriseManagerEntity,
"ImportableEnterpriseManagerEntityCollection": ImportableEnterpriseManagerEntityCollection,
"IngestDatabaseConfigurationDetails": IngestDatabaseConfigurationDetails,
"IngestDatabaseConfigurationResponseDetails": IngestDatabaseConfigurationResponseDetails,
"IngestHostConfigurationDetails": IngestHostConfigurationDetails,
"IngestHostConfigurationResponseDetails": IngestHostConfigurationResponseDetails,
"IngestHostMetricsDetails": IngestHostMetricsDetails,
"IngestHostMetricsResponseDetails": IngestHostMetricsResponseDetails,
"IngestSqlBucketDetails": IngestSqlBucketDetails,
"IngestSqlBucketResponseDetails": IngestSqlBucketResponseDetails,
"IngestSqlPlanLinesDetails": IngestSqlPlanLinesDetails,
"IngestSqlPlanLinesResponseDetails": IngestSqlPlanLinesResponseDetails,
"IngestSqlStatsDetails": IngestSqlStatsDetails,
"IngestSqlStatsResponseDetails": IngestSqlStatsResponseDetails,
"IngestSqlTextDetails": IngestSqlTextDetails,
"IngestSqlTextResponseDetails": IngestSqlTextResponseDetails,
"InstanceMetrics": InstanceMetrics,
"MacsManagedExternalDatabaseConfigurationSummary": MacsManagedExternalDatabaseConfigurationSummary,
"MacsManagedExternalDatabaseInsight": MacsManagedExternalDatabaseInsight,
"MacsManagedExternalDatabaseInsightSummary": MacsManagedExternalDatabaseInsightSummary,
"MacsManagedExternalHostConfigurationSummary": MacsManagedExternalHostConfigurationSummary,
"MacsManagedExternalHostInsight": MacsManagedExternalHostInsight,
"MacsManagedExternalHostInsightSummary": MacsManagedExternalHostInsightSummary,
"OperationsInsightsPrivateEndpoint": OperationsInsightsPrivateEndpoint,
"OperationsInsightsPrivateEndpointCollection": OperationsInsightsPrivateEndpointCollection,
"OperationsInsightsPrivateEndpointSummary": OperationsInsightsPrivateEndpointSummary,
"OperationsInsightsWarehouse": OperationsInsightsWarehouse,
"OperationsInsightsWarehouseSummary": OperationsInsightsWarehouseSummary,
"OperationsInsightsWarehouseSummaryCollection": OperationsInsightsWarehouseSummaryCollection,
"OperationsInsightsWarehouseUser": OperationsInsightsWarehouseUser,
"OperationsInsightsWarehouseUserSummary": OperationsInsightsWarehouseUserSummary,
"OperationsInsightsWarehouseUserSummaryCollection": OperationsInsightsWarehouseUserSummaryCollection,
"OperationsInsightsWarehouseUsers": OperationsInsightsWarehouseUsers,
"OperationsInsightsWarehouses": OperationsInsightsWarehouses,
"PeComanagedDatabaseConnectionDetails": PeComanagedDatabaseConnectionDetails,
"PeComanagedDatabaseHostDetails": PeComanagedDatabaseHostDetails,
"PeComanagedDatabaseInsight": PeComanagedDatabaseInsight,
"PeComanagedDatabaseInsightSummary": PeComanagedDatabaseInsightSummary,
"PeComanagedManagedExternalDatabaseConfigurationSummary": PeComanagedManagedExternalDatabaseConfigurationSummary,
"ProjectedDataItem": ProjectedDataItem,
"ResourceCapacityTrendAggregation": ResourceCapacityTrendAggregation,
"ResourceInsightCurrentUtilization": ResourceInsightCurrentUtilization,
"ResourceInsightProjectedUtilization": ResourceInsightProjectedUtilization,
"ResourceInsightProjectedUtilizationItem": ResourceInsightProjectedUtilizationItem,
"ResourceStatistics": ResourceStatistics,
"ResourceStatisticsAggregation": ResourceStatisticsAggregation,
"ResourceUsageSummary": ResourceUsageSummary,
"ResourceUsageTrendAggregation": ResourceUsageTrendAggregation,
"SqlBucket": SqlBucket,
"SqlInsightAggregation": SqlInsightAggregation,
"SqlInsightAggregationCollection": SqlInsightAggregationCollection,
"SqlInsightThresholds": SqlInsightThresholds,
"SqlInventory": SqlInventory,
"SqlPlanCollection": SqlPlanCollection,
"SqlPlanInsightAggregation": SqlPlanInsightAggregation,
"SqlPlanInsightAggregationCollection": SqlPlanInsightAggregationCollection,
"SqlPlanInsights": SqlPlanInsights,
"SqlPlanLine": SqlPlanLine,
"SqlPlanSummary": SqlPlanSummary,
"SqlResponseTimeDistributionAggregation": SqlResponseTimeDistributionAggregation,
"SqlResponseTimeDistributionAggregationCollection": SqlResponseTimeDistributionAggregationCollection,
"SqlSearchCollection": SqlSearchCollection,
"SqlSearchSummary": SqlSearchSummary,
"SqlStatisticAggregation": SqlStatisticAggregation,
"SqlStatisticAggregationCollection": SqlStatisticAggregationCollection,
"SqlStatistics": SqlStatistics,
"SqlStatisticsTimeSeries": SqlStatisticsTimeSeries,
"SqlStatisticsTimeSeriesAggregation": SqlStatisticsTimeSeriesAggregation,
"SqlStatisticsTimeSeriesAggregationCollection": SqlStatisticsTimeSeriesAggregationCollection,
"SqlStatisticsTimeSeriesByPlanAggregation": SqlStatisticsTimeSeriesByPlanAggregation,
"SqlStatisticsTimeSeriesByPlanAggregationCollection": SqlStatisticsTimeSeriesByPlanAggregationCollection,
"SqlStats": SqlStats,
"SqlText": SqlText,
"SqlTextCollection": SqlTextCollection,
"SqlTextSummary": SqlTextSummary,
"StorageServerDetails": StorageServerDetails,
"SummarizeAwrSourcesSummariesCollection": SummarizeAwrSourcesSummariesCollection,
"SummarizeDatabaseInsightResourceCapacityTrendAggregationCollection": SummarizeDatabaseInsightResourceCapacityTrendAggregationCollection,
"SummarizeDatabaseInsightResourceForecastTrendAggregation": SummarizeDatabaseInsightResourceForecastTrendAggregation,
"SummarizeDatabaseInsightResourceStatisticsAggregationCollection": SummarizeDatabaseInsightResourceStatisticsAggregationCollection,
"SummarizeDatabaseInsightResourceUsageAggregation": SummarizeDatabaseInsightResourceUsageAggregation,
"SummarizeDatabaseInsightResourceUsageTrendAggregationCollection": SummarizeDatabaseInsightResourceUsageTrendAggregationCollection,
"SummarizeDatabaseInsightResourceUtilizationInsightAggregation": SummarizeDatabaseInsightResourceUtilizationInsightAggregation,
"SummarizeDatabaseInsightTablespaceUsageTrendAggregationCollection": SummarizeDatabaseInsightTablespaceUsageTrendAggregationCollection,
"SummarizeExadataInsightResourceCapacityTrendAggregation": SummarizeExadataInsightResourceCapacityTrendAggregation,
"SummarizeExadataInsightResourceCapacityTrendCollection": SummarizeExadataInsightResourceCapacityTrendCollection,
"SummarizeExadataInsightResourceForecastTrendAggregation": SummarizeExadataInsightResourceForecastTrendAggregation,
"SummarizeExadataInsightResourceForecastTrendCollection": SummarizeExadataInsightResourceForecastTrendCollection,
"SummarizeExadataInsightResourceStatisticsAggregationCollection": SummarizeExadataInsightResourceStatisticsAggregationCollection,
"SummarizeExadataInsightResourceUsageAggregation": SummarizeExadataInsightResourceUsageAggregation,
"SummarizeExadataInsightResourceUsageCollection": SummarizeExadataInsightResourceUsageCollection,
"SummarizeExadataInsightResourceUtilizationInsightAggregation": SummarizeExadataInsightResourceUtilizationInsightAggregation,
"SummarizeHostInsightResourceCapacityTrendAggregationCollection": SummarizeHostInsightResourceCapacityTrendAggregationCollection,
"SummarizeHostInsightResourceForecastTrendAggregation": SummarizeHostInsightResourceForecastTrendAggregation,
"SummarizeHostInsightResourceStatisticsAggregationCollection": SummarizeHostInsightResourceStatisticsAggregationCollection,
"SummarizeHostInsightResourceUsageAggregation": SummarizeHostInsightResourceUsageAggregation,
"SummarizeHostInsightResourceUsageTrendAggregationCollection": SummarizeHostInsightResourceUsageTrendAggregationCollection,
"SummarizeHostInsightResourceUtilizationInsightAggregation": SummarizeHostInsightResourceUtilizationInsightAggregation,
"SummarizeHostInsightsTopProcessesUsageTrendCollection": SummarizeHostInsightsTopProcessesUsageTrendCollection,
"SummarizeOperationsInsightsWarehouseResourceUsageAggregation": SummarizeOperationsInsightsWarehouseResourceUsageAggregation,
"SummaryStatistics": SummaryStatistics,
"TablespaceUsageTrend": TablespaceUsageTrend,
"TablespaceUsageTrendAggregation": TablespaceUsageTrendAggregation,
"TopProcessesUsageTrend": TopProcessesUsageTrend,
"TopProcessesUsageTrendAggregation": TopProcessesUsageTrendAggregation,
"UpdateAutonomousDatabaseInsightDetails": UpdateAutonomousDatabaseInsightDetails,
"UpdateAwrHubDetails": UpdateAwrHubDetails,
"UpdateDatabaseInsightDetails": UpdateDatabaseInsightDetails,
"UpdateEmManagedExternalDatabaseInsightDetails": UpdateEmManagedExternalDatabaseInsightDetails,
"UpdateEmManagedExternalExadataInsightDetails": UpdateEmManagedExternalExadataInsightDetails,
"UpdateEmManagedExternalHostInsightDetails": UpdateEmManagedExternalHostInsightDetails,
"UpdateEnterpriseManagerBridgeDetails": UpdateEnterpriseManagerBridgeDetails,
"UpdateExadataInsightDetails": UpdateExadataInsightDetails,
"UpdateHostInsightDetails": UpdateHostInsightDetails,
"UpdateMacsManagedExternalDatabaseInsightDetails": UpdateMacsManagedExternalDatabaseInsightDetails,
"UpdateMacsManagedExternalHostInsightDetails": UpdateMacsManagedExternalHostInsightDetails,
"UpdateOperationsInsightsPrivateEndpointDetails": UpdateOperationsInsightsPrivateEndpointDetails,
"UpdateOperationsInsightsWarehouseDetails": UpdateOperationsInsightsWarehouseDetails,
"UpdateOperationsInsightsWarehouseUserDetails": UpdateOperationsInsightsWarehouseUserDetails,
"UpdatePeComanagedDatabaseInsightDetails": UpdatePeComanagedDatabaseInsightDetails,
"WorkRequest": WorkRequest,
"WorkRequestCollection": WorkRequestCollection,
"WorkRequestError": WorkRequestError,
"WorkRequestErrorCollection": WorkRequestErrorCollection,
"WorkRequestLogEntry": WorkRequestLogEntry,
"WorkRequestLogEntryCollection": WorkRequestLogEntryCollection,
"WorkRequestResource": WorkRequestResource,
"WorkRequests": WorkRequests
}
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPathos(PythonPackage):
"""Parallel graph management and execution in heterogeneous computing """
homepage = "https://github.com/uqfoundation/pathos"
url = "https://pypi.io/packages/source/p/pathos/pathos-0.2.0.zip"
version('0.2.0', sha256='2f4e67e7914c95fb0cce766bab173eb2c5860ee420108fa183099557ac2e50e9')
depends_on('python@2.6:2.8,3.1:')
depends_on('py-setuptools@0.6:', type='build')
depends_on('py-multiprocess@0.70.4:', type=('build', 'run'))
depends_on('py-pox@0.2.2:', type=('build', 'run'))
depends_on('py-ppft@1.6.4.5:', type=('build', 'run'))
depends_on('py-dill@0.2.5:', type=('build', 'run'))
|
from django.db import models
from django.utils.translation import gettext as _
# Create your models here.
class Article(models.Model):
title = models.CharField(_("Title"), max_length=150)
content = models.TextField(_("Content"))
def __str__(self):
return self.title
|
from __future__ import annotations
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel
from enum import Enum
class RoleEnum(str, Enum):
# super admin
SUPER = 'super'
# tester
TEST = 'test'
USER = 'user'
class RouteMeta(BaseModel):
# title
title: str
# Whether to ignore permissions
ignoreAuth: Optional[bool]
# role info
roles: Optional[List[RoleEnum]]
# Whether not to cache
ignoreKeepAlive: Optional[bool]
# Is it fixed on tab
affix: Optional[bool]
# icon on tab
icon: Optional[str]
frameSrc: Optional[str]
# current page transition
transitionName: Optional[str]
# Whether the route has been dynamically added
hideBreadcrumb: Optional[bool]
# Hide submenu
hideChildrenInMenu: Optional[bool]
# Carrying parameters
carryParam: Optional[bool]
# Used internally to mark single-level menus
single: Optional[bool]
# Currently active menu
currentActiveMenu: Optional[str]
# Never show in tab
hideTab: Optional[bool]
# Never show in menu
hideMenu: Optional[bool]
isLink: Optional[bool]
class Recordable(BaseModel):
__root__: Dict[str, str]
class RouteItem(BaseModel):
path: str
component: Any
meta: RouteMeta
name: Optional[str]
props: Optional[Recordable];
alias: Optional[Union[str, List[str]]]
redirect: Optional[str]
caseSensitive: Optional[bool]
children: Optional[List[RouteItem]]
RouteItem.update_forward_refs() |
class Solution:
def maxProfit(self, prices: List[int], fee: int) -> int:
profit=0 #initialize ans variable to 0 to avoid garbage value
holding=-prices[0]
for i in range(1,len(prices)): #iterate thrrough the array
profit=max(profit,holding+prices[i]-fee) #keep adding profit
holding=max(holding,profit-prices[i])
return profit |
"""
Soft-Encoding Utils
+ Convert ab channels to categorical data as Zhang et al. paper
References:
+ https://github.com/foamliu/Colorful-Image-Colorization
"""
import numpy as np
import sklearn.neighbors as nn
class ColorizedSoftEncoding(object):
"""
Class Convert Channeld ab in Lab to Categorical Data as Zhang et al. paper
pts_in_hull.npy --> array of pts in colorspaces ab for categorical data
(shape: (??, 2))
Usage:
soft_encoding = ColorizedSoftEncoding(pts_in_hull_path = "pts_in_hull.npy",
nb_neighbors = 5, sigma_neighbor = 5)
image_Lab = read_image(...)["org_image_Lab"]
y = soft_encoding(image_Lab);
"""
def __init__(self, pts_in_hull_path, nb_neighbors = 5, sigma_neighbor = 5):
self.pts_in_hull_path = pts_in_hull_path
self.nb_neighbors = nb_neighbors
self.sigma_neighbor = sigma_neighbor
self.q_ab, self.nn_finder = load_nn_finder(self.pts_in_hull_path, self.nb_neighbors)
self.nb_q = self.q_ab.shape[0]
pass
# __init__
def __call__(self, image_Lab):
self.input = image_Lab
self.output = get_soft_encoding(self.input, self.nn_finder, self.nb_q, self.sigma_neighbor)
return self.output
# __call__
# ColorizedSoftEncoding
def load_nn_finder(pts_in_hull_path, nb_neighbors = 5):
# Load the array of quantized ab value
q_ab = np.load(pts_in_hull_path)
nn_finder = nn.NearestNeighbors(n_neighbors=nb_neighbors, algorithm='ball_tree').fit(q_ab)
return q_ab, nn_finder
# load_nn_finder
def get_soft_encoding(image_Lab, nn_finder, nb_q, sigma_neighbor = 5):
"""
image_Lab = read_image("...")["res_image_Lab"]
q_ab, nn_finder = load_nn_finder("pts_in_hull.npy", nb_neighbors = 5)
y = get_soft_encoding(image_Lab, nn_finder, nb_q = q_ab.shape[0], sigma_neighbor = 5)
"""
# get and normalize image_ab
# due to preprocessing weighted with minus 128
image_ab = image_Lab[:, :, 1:].astype(np.int32) - 128
h, w = image_ab.shape[:2]
a = np.ravel(image_ab[:, :, 0])
b = np.ravel(image_ab[:, :, 1])
ab = np.vstack((a, b)).T
# Get the distance to and the idx of the nearest neighbors
dist_neighb, idx_neigh = nn_finder.kneighbors(ab)
# Smooth the weights with a gaussian kernel
wts = np.exp(-dist_neighb ** 2 / (2 * sigma_neighbor ** 2))
wts = wts / np.sum(wts, axis=1)[:, np.newaxis]
# format the target
y = np.zeros((ab.shape[0], nb_q))
idx_pts = np.arange(ab.shape[0])[:, np.newaxis]
y[idx_pts, idx_neigh] = wts
y = y.reshape(h, w, nb_q)
return y
# get_soft_encoding |
def valid_number(phonenumber):
if len(phonenumber)==12:
return True
return False
k = valid_number("919492331133")
print(k) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import tempfile
import math
import numpy as _np
import mxnet as mx
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, '../python/unittest/'))
from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, default_context, check_symbolic_forward, create_2d_tensor, use_np
from mxnet import gluon, np, npx
from common import with_seed
import pytest
# dimension constants
MEDIUM_X = 10000
LARGE_X = 100000000
SMALL_X = 100
SMALL_Y = 50
@use_np
def test_gluon_embedding():
m = gluon.nn.Embedding(SMALL_Y, MEDIUM_X)
m.initialize()
a = np.zeros((MEDIUM_X, SMALL_Y))
b = m(a)
assert b.shape == (MEDIUM_X, SMALL_Y, MEDIUM_X)
assert b.asnumpy().size == MEDIUM_X * SMALL_Y * MEDIUM_X
@use_np
def test_fully_connected():
a = np.ones(shape=(LARGE_X, SMALL_Y))
b = np.ones(shape=(SMALL_Y, SMALL_Y))
c = np.ones(shape=(b.shape[0],))
# w/o bias
res = mx.npx.fully_connected(a, b, num_hidden=b.shape[0], no_bias=True)
assert np.sum(res[-1] == a.shape[1]) == b.shape[0]
# w/ bias
res = mx.npx.fully_connected(a, b, c, num_hidden=b.shape[0], no_bias=False)
assert np.sum(res[-1] == a.shape[1] + 1) == b.shape[0]
@use_np
def test_dense():
data = np.ones(shape=(LARGE_X, SMALL_X))
linear = gluon.nn.Dense(SMALL_Y)
linear.initialize()
res = linear(data)
assert res.shape == (LARGE_X, SMALL_Y)
@use_np
def test_softmax():
input_data = np.ones((SMALL_Y, LARGE_X))
for axis in [0, 1]:
true_output = np.full((SMALL_Y, LARGE_X), (1 / input_data.shape[axis]))
output = npx.softmax(input_data, axis=axis)
assert_almost_equal(output.asnumpy(), true_output, rtol=1e-5, atol=1e-5)
|
num = [1,2,3]
num_sum = 0
count = 0
for x in num:
num_sum = num_sum + x
count = count + 1
if count == 2:
break
print (num_sum)
print (count) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.