content
stringlengths 5
1.05M
|
|---|
from django.contrib import admin
from ..models import DocumentImage
class DocumentImageAdmin(admin.StackedInline):
"""
Admin Interface to for the DocumentImage module.
Inheriting from `admin.StackedInline`.
"""
model = DocumentImage
search_fields = ["name"]
fields = ["name", "image", "image_tag"]
superuser_fields = ["confirmed"]
readonly_fields = ["image_tag"]
autocomplete_fields = ["document"]
insert_after = "autocomplete_fields"
extra = 0
def get_fields(self, request, obj=None):
"""Override djangos get_fields function
to add custom superuser fields to the
admin interface if the user has the corresponding
access rights.
:param request: current request
:type request: django.http.request
:param obj: [description], defaults to None
:type obj: django.db.models, optional
:return: custom fields list
:rtype: list[str]
"""
if request.user.is_superuser and self.superuser_fields:
return (self.fields or tuple()) + self.superuser_fields
return super(DocumentImageAdmin, self).get_fields(request, obj)
|
import six
import pytz
import datetime
from django.utils.encoding import smart_text
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import serializers
class RecursiveField(serializers.Serializer):
"""
Field for recursive tree scaling with set depth.
many=True should be passed
:param max_depth: int - depth of tree should be passed, None causes to full tree scaling
"""
def __init__(self, *args, **kwargs):
self._max_depth = kwargs.pop('max_depth', None)
self._recurse_lvl = 1
self._recurse_many = kwargs.pop('many', False)
super().__init__(**kwargs)
def to_representation(self, value):
parent = self.parent # parent is parent serializer instance
if isinstance(parent, serializers.ListSerializer):
parent = parent.parent
lvl = getattr(parent, '_recurse_lvl', 1)
max_lvl = self._max_depth or getattr(parent, '_recurse_max', None)
serializer_class = parent.__class__
if not max_lvl or lvl <= max_lvl:
serializer = serializer_class(
value, many=self._recurse_many, context=self.context)
serializer._recurse_lvl = lvl + 1
serializer._max_depth = max_lvl
return serializer.data
else:
return value.id
class TimestampField(serializers.Field):
"""
Convert a django datetime to/from timestamp.
"""
def to_representation(self, value):
"""
Convert the field to its internal representation (aka timestamp)
:param value: the DateTime value
:return: a UTC timestamp integer
"""
request = self.context.get('request')
if not request:
return value.timestamp()
try:
user_timezone_text = request.user.timezone
except AttributeError:
user_timezone_text = 'UTC'
user_timezone = pytz.timezone(user_timezone_text)
ts = value.astimezone(user_timezone).timestamp()
return ts
def to_internal_value(self, value):
"""
deserialize a timestamp to a DateTime value
:param value: the timestamp value
:return: a django DateTime value
"""
converted = datetime.datetime.fromtimestamp(float('%s' % value))
return converted
class TimestampFromDateField(TimestampField):
"""
Convert a django date to/from timestamp.
"""
def to_representation(self, value):
"""
Convert the field to its internal representation (aka timestamp)
:param value: the Date value
:return: a UTC timestamp integer
"""
_datetime = datetime.datetime.combine(value, datetime.time.min)
ts = _datetime.timestamp()
return ts
class NaturalChoiceField(serializers.ChoiceField):
"""
Choice field that really get receives and validates
values for external representation that defined in choices
and convert it for internal values defined in choices.
And do the opposite action too.
Other behavior is totally the same as in ChoiceField.
Usage:
CHOICES = (("internal", "external"), )
field = NaturalChoiceField(choices=CHOICES)
field.to_representation("internal")
>> "external"
field.to_representation("external")
>> "external"
field.to_internal_value("external")
>> "internal"
field.to_internal_value("internal")
>> ValidationError: [ErrorDetail(...)]
"""
def to_internal_value(self, data):
"""
Overwritten .to_internal_value() of ChoiceField.
Changed behaviour in try: ... block of code. Originally
it returns `self.choice_strings_to_values[six.text_type(data)]`.
:param data:
:return: internal representation of external value from choices
or raise ValidationError
"""
if data == '' and self.allow_blank:
return ''
try:
for internal, external in self.choice_strings_to_values.items():
if external == data:
return internal
else:
raise KeyError
except KeyError:
self.fail('invalid_choice', input=data)
def _get_choices(self):
return self._choices
@property
def choices(self):
return self._choices
@choices.setter
def choices(self, choices):
# Map the string representation of choices to the underlying value.
# Allows us to deal with eg. integer choices while supporting either
# integer or string input, but still get the correct datatype out.
super()._set_choices(choices)
self.choice_strings_to_values = {
key: val for key, val in self.choices.items()
}
def to_representation(self, value):
if value in ('', None):
return value
return self.choice_strings_to_values.get(value, value)
class GetOrCreateSlugRelatedField(serializers.SlugRelatedField):
"""
A SlugRelatedField that make possible either to create relations
between objects that has already exists and which should be created.
"""
def __init__(self, slug_field=None, get_or_create=True, **kwargs):
self.get_or_create = get_or_create
super().__init__(slug_field, **kwargs)
def do_action(self, data):
queryset = self.get_queryset()
action = 'get_or_create' if self.get_or_create else 'get'
attr = getattr(queryset, action)
if action == 'get':
return attr(**{self.slug_field: data})
else:
return attr(**{self.slug_field: data})[0]
def to_internal_value(self, data):
"""
Overwritten parent method in `try` block to make possible perform
`get` or `get_or_create` action.
"""
try:
return self.do_action(data)
except ObjectDoesNotExist:
self.fail('does_not_exist', slug_name=self.slug_field, value=smart_text(data))
except (TypeError, ValueError):
self.fail('invalid')
|
from django.views.generic import TemplateView
from django.views.decorators.cache import never_cache
from rest_framework import viewsets
from rest_framework import generics
from django.shortcuts import render
from .models import Message, MessageSerializer, Todo, TodoSerializer
# Serve Vue Application
index_view = never_cache(TemplateView.as_view(template_name='index.html'))
class MessageViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows messages to be viewed or edited.
"""
queryset = Message.objects.all()
serializer_class = MessageSerializer
class ListTodo(generics.ListCreateAPIView):
queryset = Todo.objects.all()
serializer_class = TodoSerializer
class DetailTodo(generics.RetrieveUpdateDestroyAPIView):
queryset = Todo.objects.all()
serializer_class = TodoSerializer
|
import os
import imageio
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import load_mnist as mnist
# See http://lyy1994.github.io/machine-learning/2017/04/17/RBM-tensorflow-implementation.html
def weight(shape, name='weights'):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1), name=name)
def bias(shape, name='biases'):
return tf.Variable(tf.constant(0.1, shape=shape), name=name)
class RBM:
i = 0 # fliping index for computing pseudo likelihood
def __init__(self, n_visible=784, n_hidden=500, k=30, momentum=False):
self.n_visible = n_visible
self.n_hidden = n_hidden
self.k = k
self.lr = tf.placeholder(tf.float32)
if momentum:
self.momentum = tf.placeholder(tf.float32)
else:
self.momentum = 0.0
self.w = weight([n_visible, n_hidden], 'w')
self.hb = bias([n_hidden], 'hb')
self.vb = bias([n_visible], 'vb')
self.w_v = tf.Variable(tf.zeros([n_visible, n_hidden]), dtype=tf.float32)
self.hb_v = tf.Variable(tf.zeros([n_hidden]), dtype=tf.float32)
self.vb_v = tf.Variable(tf.zeros([n_visible]), dtype=tf.float32)
def propup(self, visible):
pre_sigmoid_activation = tf.matmul(visible, self.w) + self.hb
return tf.nn.sigmoid(pre_sigmoid_activation)
def propdown(self, hidden):
pre_sigmoid_activation = tf.matmul(hidden, tf.transpose(self.w)) + self.vb
return tf.nn.sigmoid(pre_sigmoid_activation)
def sample_h_given_v(self, v_sample):
h_props = self.propup(v_sample)
h_sample = tf.nn.relu(tf.sign(h_props - tf.random_uniform(tf.shape(h_props))))
return h_sample
def sample_v_given_h(self, h_sample):
v_props = self.propdown(h_sample)
v_sample = tf.nn.relu(tf.sign(v_props - tf.random_uniform(tf.shape(v_props))))
return v_sample
def CD_k(self, visibles):
# k steps gibbs sampling
v_samples = visibles
h_samples = self.sample_h_given_v(v_samples)
for _ in range(self.k):
v_samples = self.sample_v_given_h(h_samples)
h_samples = self.sample_h_given_v(v_samples)
h0_props = self.propup(visibles)
w_positive_grad = tf.matmul(tf.transpose(visibles), h0_props)
w_negative_grad = tf.matmul(tf.transpose(v_samples), h_samples)
w_grad = (w_positive_grad - w_negative_grad) / tf.to_float(tf.shape(visibles)[0])
hb_grad = tf.reduce_mean(h0_props - h_samples, 0)
vb_grad = tf.reduce_mean(visibles - v_samples, 0)
return w_grad, hb_grad, vb_grad
def learn(self, visibles):
w_grad, hb_grad, vb_grad = self.CD_k(visibles)
# compute new velocities
new_w_v = self.momentum * self.w_v + self.lr * w_grad
new_hb_v = self.momentum * self.hb_v + self.lr * hb_grad
new_vb_v = self.momentum * self.vb_v + self.lr * vb_grad
# update parameters
update_w = tf.assign(self.w, self.w + new_w_v)
update_hb = tf.assign(self.hb, self.hb + new_hb_v)
update_vb = tf.assign(self.vb, self.vb + new_vb_v)
# update velocities
update_w_v = tf.assign(self.w_v, new_w_v)
update_hb_v = tf.assign(self.hb_v, new_hb_v)
update_vb_v = tf.assign(self.vb_v, new_vb_v)
return [update_w, update_hb, update_vb, update_w_v, update_hb_v, update_vb_v]
def sampler(self, visibles, steps=5000):
v_samples = visibles
for _ in range(steps):
v_samples = self.sample_v_given_h(self.sample_h_given_v(v_samples))
return v_samples
def free_energy(self, visibles):
first_term = tf.matmul(visibles, tf.reshape(self.vb, [tf.shape(self.vb)[0], 1]))
second_term = tf.reduce_sum(
tf.log(1 + tf.exp(self.hb + tf.matmul(visibles, self.w))), axis=1)
return - first_term - second_term
def pseudo_likelihood(self, visibles):
x = tf.round(visibles)
x_fe = self.free_energy(x)
split0, split1, split2 = tf.split(x, [self.i, 1, tf.shape(x)[1] - self.i - 1], 1)
xi = tf.concat([split0, 1 - split1, split2], 1)
self.i = (self.i + 1) % self.n_visible
xi_fe = self.free_energy(xi)
return tf.reduce_mean(self.n_visible * tf.log(tf.nn.sigmoid(xi_fe - x_fe)), axis=0)
def save_images(images, size, path):
"""
Save the samples images.
The best size number is
int(max(sqrt(image.shape[0]),sqrt(image.shape[1]))) + 1
example:
The batch_size is 64, then the size is recommended [8, 8]
The batch_size is 32, then the size is recommended [6, 6]
"""
img = (images + 1.0) / 2.0
h, w = img.shape[1], img.shape[2]
merge_img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
merge_img[(j * h) : (j * h + h), (i * w) : (i * w + w)] = image
return imageio.imwrite(path, merge_img)
def train(train_data, epoches):
logs_dir = "./logs"
samples_dir = "./samples"
x = tf.placeholder(tf.float32, shape=[None, 784])
noise_x, _ = train_data.sample_batch()
# noise_x = tf.random_normal([train_data.batch_size, 784])
rbm = RBM()
step = rbm.learn(x)
sampler = rbm.sampler(x, 200) # TODO
pl = rbm.pseudo_likelihood(x)
saver = tf.train.Saver()
print("Initialization finished!")
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
mean_cost = []
epoch = 1
for i in range(epoches * train_data.batch_num):
# Draw samples (10 images)
if i % (epoches * train_data.batch_num / 10) == 0:
samples = sess.run(sampler, feed_dict = {x: noise_x})
samples = samples.reshape([train_data.batch_size, 28, 28])
save_images(samples, [8, 8],
os.path.join(samples_dir,
'iteration_%d.png' % (i / train_data.batch_num)))
print('Saved samples.')
batch_x, _ = train_data.next_batch()
sess.run(step, feed_dict = {x: batch_x, rbm.lr: 0.1})
cost = sess.run(pl, feed_dict = {x: batch_x})
mean_cost.append(cost)
# Save model
if i is not 0 and train_data.batch_index is 0:
checkpoint_path = os.path.join(logs_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step = epoch + 1)
print('Saved Model.')
# Print pseudo likelihood
if i is not 0 and train_data.batch_index is 0:
print('Epoch %d Cost %g' % (epoch, np.mean(mean_cost)))
mean_cost = []
epoch += 1
print('Test')
samples = sess.run(sampler, feed_dict = {x: noise_x})
samples = samples.reshape([train_data.batch_size, 28, 28])
save_images(samples, [8, 8], os.path.join(samples_dir, 'test.png'))
print('Saved samples.')
data_path = "../../machine-learning/data/mnist/"
train_data = mnist.MNIST("train", data_path, data_size=256, batch_size=64)
# test_data = mnist.MNIST("test", data_path)
train(train_data, 10)
|
'''
Autor: Lázaro Martínez Abraham Josué
Titulo: leergramatica.py
Versión: 1.0
Fecha: 6 de diciembre de 2020
'''
def informacion(nombre):
'''Obtiene la información de la gramatica de los archivos
Parámetros
nombre: nombre del archivo que contiene la información de la gramática
return diccionario con la información gramatical'''
import sys
if sys.platform != "linux":
sec = "->"
else:
sec = "→"
archivo = open(nombre,"r")
datos = archivo.readlines()
archivo.close()
info={}
for linea in datos:
# si contiene a los no terminales
if "No terminales:" in linea:
N = linea.split("No terminales:")[1].strip().split(" ")
info["N"]=N
# si contiene a los terminales
elif "Terminales:" in linea:
M = linea.split("Terminales:")[1].strip().split(" ")
info["M"]=M
if "First" not in info:
info["First"]={}
for i in M:
info["First"][i]=[i]
# si contiene la palabra First, es la especificación de los first
elif "First" in linea:
primSep = linea.split(":")
cabecera = primSep[0].strip().split(" ")[1]
elem = primSep[1].strip().split(" ")
elemp =[]
for i in elem:
if i == "eps":
elemp.append("")
else:
elemp.append(i)
if "First" not in info:
info["First"]={}
info["First"][cabecera]=elemp
# si es una línea que está después de la línea que dice "Producciones"
elif datos.index(linea)>datos.index("Producciones\n"):
if "Gramatica" not in info:
info["Gramatica"]={}
if "Producciones" not in info:
info["Producciones"]=[]
separacion = linea.split(sec)
cabecera = separacion[0].strip()
produccion = separacion[1].strip().split(" ")
produccionp = []
for i in produccion:
if i == "eps":
produccionp.append("")
else:
produccionp.append(i)
produccion = produccionp
if cabecera not in info["Gramatica"]:
info["Gramatica"][cabecera]=[produccion]
else:
info["Gramatica"][cabecera].append(produccion)
info["Producciones"].append([cabecera,produccion])
elif "Inicial" in linea:
sep = linea.split("Inicial:")[1].strip()
info["Inicial"]=sep
return info
def imprimirGramatica(info):
for key in info:
print(key,info[key])
print("*"*70)
if __name__ == '__main__':
nombre = "a"
info=informacion(nombre)
for key in info:
print(key,info[key])
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .tool import ToolsSupported
from .workspace import Workspace
from .settings import ProjectSettings
import build
help = 'Flash a project'
def run(args):
#first build a project then flash it
build.run(args)
# time to flash
if args.file:
# known project from records
workspace = Workspace(args.file, os.getcwd())
if args.project:
workspace.flash_project(args.project, args.tool)
else:
workspace.flash_projects(args.tool)
else:
# not project known by pgen
project_settings = ProjectSettings()
project_files = [os.path.join(args.directory, args.project)]
flasher = ToolsSupported().get_value(args.tool, 'flasher')
build(flasher, args.project, project_files, args.tool, project_settings)
def setup(subparser):
subparser.add_argument(
"-f", "--file", help="YAML projects file")
subparser.add_argument("-p", "--project", help="Name of the project to flash")
subparser.add_argument(
"-t", "--tool", help="Flash a project files for provided tool")
subparser.add_argument(
"-dir", "--directory", help="The projects directory")
subparser.add_argument(
"-defdir", "--defdirectory",
help="Path to the definitions, otherwise default (~/.pg/definitions) is used")
|
"""
Day 18
"""
from typing import List, Union
import operator
def convert(day_input: List[str]) -> List[List[str]]:
"""Return each input token in a separate position"""
return [line.replace('(', '( ').replace(')', ' )').split() for line in day_input]
def eval_infix(expr: List[str]) -> str:
"""Evals expr sequentially, just respecting precedence of *(* and *)*.
Consumes the evaluated tokens in *expr*, including the final *)* if its the
last token of the expression
Note: expects *expr* to be in infix notation but reversed, as it uses
pop/append to manipulate *expr*"""
ops = {'+': operator.add, '*': operator.mul}
while len(expr) >= 1:
arg1 = expr.pop()
if arg1 == '(': arg1 = eval_infix(expr)
if len(expr) == 0: return arg1
op = expr.pop()
if op == ')': return arg1
arg2 = expr.pop()
if arg2 == '(': arg2 = eval_infix(expr)
expr.append(str(ops[op](int(arg1), int(arg2))))
return expr[0]
list_reverse = lambda l: list(reversed(l))
def solve_part_one(day_input: List[str]) -> int:
res = [int(eval_infix(list_reverse(line))) for line in convert(day_input)]
return sum(res)
def solve_part_two(day_input: List[str]) -> int:
def find_expr_boundary(line: List[str], start_idx: int, step: int, lvl_up: str, lvl_down: str) -> int:
"""Finds the boundary of an expression, starting at *start_idx*, in direction *step*,
considering that *lvl_up* and *lvl_down* delimit sub-expressions. This makes it usable
to find both boundaries to the left or right of a position"""
lvl, idx = 0, start_idx + step
while lvl > 0 or line[idx] == lvl_up:
# Increase or decrease level depending on this position
lvl = lvl + (line[idx] == lvl_up) - (line[idx] == lvl_down)
# If reach the boundary break to avoid adding step one more time
if lvl == 0: break
idx += step
return idx
# Strategy is to add '(' and ')' around all the '+' operators found, so that
# the eval function can work sequentially, just giving priority to expressions
# surrounded by '(' ')'
res = []
for line in convert(day_input):
idx = 0
while idx < len(line):
if line[idx] == '+':
at = find_expr_boundary(line, idx, -1, ')', '(')
line.insert(at, '(')
at = find_expr_boundary(line, idx + 1, +1, '(', ')')
line.insert(at + 1, ')')
idx += 2 # Inserted 2, so advance
idx += 1
res.append(int(eval_infix(list_reverse(line))))
return sum(res)
|
# Generated by Django 2.0.3 on 2018-07-26 21:26
import apps.core.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit', '0004_auto_20180726_1519'),
]
operations = [
migrations.AddField(
model_name='post',
name='media_link',
field=apps.core.fields.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='post',
name='media_type',
field=models.CharField(choices=[('link', 'link'), ('text', 'text'), ('photo', 'photo'), ('video', 'video')], default='link', max_length=200),
),
migrations.AddField(
model_name='post',
name='nsfw',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='post',
name='score',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='post',
name='text',
field=models.TextField(blank=True, null=True),
),
]
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova import compute
from nova import exception
ALIAS = "os-server-diagnostics"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class ServerDiagnosticsController(object):
@extensions.expected_errors(404)
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
compute_api = compute.API()
try:
instance = compute_api.get(context, server_id)
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(e.format_message())
return compute_api.get_diagnostics(context, instance)
class ServerDiagnostics(extensions.V3APIExtensionBase):
"""Allow Admins to view server diagnostics through server action."""
name = "ServerDiagnostics"
alias = ALIAS
version = 1
def get_resources(self):
parent_def = {'member_name': 'server', 'collection_name': 'servers'}
resources = [
extensions.ResourceExtension(ALIAS,
ServerDiagnosticsController(),
parent=parent_def)]
return resources
def get_controller_extensions(self):
return []
|
import uuid
import json
import health_inspector
g_client = None
CATEGORY_WORKER = 4
HEALTH_INSPECTOR_MODULE_ID = uuid.UUID('4e5f74d0-4705-11ec-abd0-e12370ec4fc6')
def init(client, **kwargs):
"""
:param client:
:param kwargs:
:return:
"""
global g_client
g_client = client
return True
def run(message, **kwargs):
"""
:param bytes message:
:param kwargs:
:return bytes or None: None if post will happen asynchronously
"""
#message_dict = json.loads(message.decode('utf-8'))
result = health_inspector.main()
result = "\n".join(result)
result = ''.join([c for c in result if ord(c) > 31 or ord(c) == 9])
# Turn into bytes
message = result.encode('utf-8')
#return message
return message
def getinfo():
"""
:return:
"""
return { "type": CATEGORY_WORKER, "version" : {"major": 2, "minor": 0}, "id" : HEALTH_INSPECTOR_MODULE_ID}
def deinit(**kwargs):
"""
:param kwargs:
:return:
"""
return True
|
import hashlib
import logging
import random
import time
import uuid
from website.db.subclassing import SubfieldBase
from website.views.fields import MultiSelectFormField
from django.core import exceptions
from django.db import models
from django.db.models import SlugField, ManyToManyField
from django.utils.text import capfirst
class OneToManyField(ManyToManyField):
"""A forgein key field that behaves just like djangos ManyToMany field,
the only difference is that an instance of the other side can only be
related to one instance of your side. Also see the test cases.
"""
def contribute_to_class(self, cls, name):
# Check if the intermediate model will be auto created.
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
auto_intermediate = False
if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped:
auto_intermediate = True
# One call super contribute_to_class and have django create the intermediate model.
super(OneToManyField, self).contribute_to_class(cls, name)
if auto_intermediate == True:
# Set unique_together to the 'to' relationship, this ensures a OneToMany relationship.
self.rel.through._meta.unique_together = ((self.rel.through._meta.unique_together[0][1],),)
class AutoMD5SlugField(SlugField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('blank', True)
populate_from = kwargs.pop('populate_from', None)
if populate_from is None:
logging.warning("missing 'populate_from' argument")
self._populate_from = ''
else:
self._populate_from = populate_from
self.hash_key = kwargs.pop('hash_key', time.time)
super(AutoMD5SlugField, self).__init__(*args, **kwargs)
def get_new_slug(self, model_instance, extra=''):
slug_field = model_instance._meta.get_field(self.attname)
if callable(self.hash_key):
hash_key = self.hash_key()
else:
hash_key = self.hash_key
# 所选字段取不到值的时候,用uuid来代替做hash
hash_field_val = getattr(model_instance, self._populate_from) or str(uuid.uuid1())
_data = '%s%s%s' % (hash_key, hash_field_val, extra)
slug = hashlib.md5(_data.encode('utf-8')).hexdigest()
slug_len = slug_field.max_length
if slug_len:
slug = slug[:slug_len]
return slug
def create_slug(self, model_instance, add):
# get fields to populate from and slug field to set
slug = getattr(model_instance, self.attname)
if slug:
# slugify the original field content and set next step to 2
return slug
slug = self.get_new_slug(model_instance)
# exclude the current model instance from the queryset used in finding
# the next valid slug
if hasattr(model_instance, 'gen_slug_queryset'):
queryset = model_instance.gen_slug_queryset()
else:
queryset = model_instance.__class__._default_manager.all()
if model_instance.pk:
queryset = queryset.exclude(pk=model_instance.pk)
kwargs = {}
kwargs[self.attname] = slug
while queryset.filter(**kwargs).count() > 0:
slug = self.get_new_slug(model_instance, random.random())
kwargs[self.attname] = slug
return slug
def pre_save(self, model_instance, add):
value = str(self.create_slug(model_instance, add))
setattr(model_instance, self.attname, value)
return value
def get_internal_type(self):
return "SlugField"
class MultiSelectField(models.Field, metaclass=SubfieldBase):
def get_internal_type(self):
return "CharField"
def get_choices_default(self):
return self.get_choices(include_blank=False)
def formfield(self, **kwargs):
# don't call super, as that overrides default widget if it has choices
defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name),
'help_text': self.help_text, 'choices': self.choices}
if self.has_default():
defaults['initial'] = self.get_default()
defaults.update(kwargs)
return MultiSelectFormField(**defaults)
def get_prep_value(self, value):
return value
def get_db_prep_value(self, value, connection=None, prepared=False):
if isinstance(value, str):
return value
elif isinstance(value, list):
return ",".join(value)
def to_python(self, value):
if value is not None:
return value if isinstance(value, list) else value.split(',')
return ''
def contribute_to_class(self, cls, name):
super(MultiSelectField, self).contribute_to_class(cls, name)
if self.choices:
func = lambda self, fieldname=name, choicedict=dict(self.choices): ",".join(
[choicedict.get(value, value) for value in getattr(self, fieldname)])
setattr(cls, 'get_%s_display' % self.name, func)
def validate(self, value, model_instance):
arr_choices = self.get_choices_selected(self.get_choices_default())
for opt_select in value:
if (int(opt_select) not in arr_choices): # the int() here is for comparing with integer choices
raise exceptions.ValidationError(self.error_messages['invalid_choice'] % value)
return
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value)
class BetterCharField(models.Field):
'''
my_field = BetterCharField(25)
'''
def __init__(self, length, *args, **kwargs):
self.length = length
super(BetterCharField, self).__init__(*args, **kwargs)
def db_type(self, connection):
return 'char(%s)' % self.length
class CloudImageField(models.ImageField):
def __init__(self, verbose_name=None, name=None, width_field=None,
height_field=None, **kwargs):
from website.views.utils import QiniuStorage
kwargs['storage'] = QiniuStorage()
super(CloudImageField, self).__init__(verbose_name, name, width_field, height_field, **kwargs)
# Fix south problems
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], ["^website\.model_fields\.CloudImageField"])
|
from problems.vrp.problem_vrp import VRPDataset
from problems.vrp.problem_vrp import CVRP
import time
import torch
data = VRPDataset( size=10)
problem = CVRP()
start = time.time()
distance_matrix = torch.cdist(data[1]["loc"], data[1]["loc"], p=2.0, compute_mode='use_mm_for_euclid_dist_if_necessary')
neighbors = torch.topk(distance_matrix, 5, largest = False)
print(time.time()-start)
print(neighbors.indices)
|
from taichi._lib import core as ti_core
# ========================================
# real types
# ----------------------------------------
float16 = ti_core.DataType_f16
"""16-bit precision floating point data type.
"""
# ----------------------------------------
f16 = float16
"""Alias for :const:`~taichi.types.primitive_types.float16`
"""
# ----------------------------------------
float32 = ti_core.DataType_f32
"""32-bit single precision floating point data type.
"""
# ----------------------------------------
f32 = float32
"""Alias for :const:`~taichi.types.primitive_types.float32`
"""
# ----------------------------------------
float64 = ti_core.DataType_f64
"""64-bit double precision floating point data type.
"""
# ----------------------------------------
f64 = float64
"""Alias for :const:`~taichi.types.primitive_types.float64`
"""
# ----------------------------------------
# ========================================
# Integer types
# ----------------------------------------
int8 = ti_core.DataType_i8
"""8-bit signed integer data type.
"""
# ----------------------------------------
i8 = int8
"""Alias for :const:`~taichi.types.primitive_types.int8`
"""
# ----------------------------------------
int16 = ti_core.DataType_i16
"""16-bit signed integer data type.
"""
# ----------------------------------------
i16 = int16
"""Alias for :const:`~taichi.types.primitive_types.int16`
"""
# ----------------------------------------
int32 = ti_core.DataType_i32
"""32-bit signed integer data type.
"""
# ----------------------------------------
i32 = int32
"""Alias for :const:`~taichi.types.primitive_types.int32`
"""
# ----------------------------------------
int64 = ti_core.DataType_i64
"""64-bit signed integer data type.
"""
# ----------------------------------------
i64 = int64
"""Alias for :const:`~taichi.types.primitive_types.int64`
"""
# ----------------------------------------
uint8 = ti_core.DataType_u8
"""8-bit unsigned integer data type.
"""
# ----------------------------------------
u8 = uint8
"""Alias for :const:`~taichi.types.primitive_types.uint8`
"""
# ----------------------------------------
uint16 = ti_core.DataType_u16
"""16-bit unsigned integer data type.
"""
# ----------------------------------------
u16 = uint16
"""Alias for :const:`~taichi.types.primitive_types.uint16`
"""
# ----------------------------------------
uint32 = ti_core.DataType_u32
"""32-bit unsigned integer data type.
"""
# ----------------------------------------
u32 = uint32
"""Alias for :const:`~taichi.types.primitive_types.uint32`
"""
# ----------------------------------------
uint64 = ti_core.DataType_u64
"""64-bit unsigned integer data type.
"""
# ----------------------------------------
u64 = uint64
"""Alias for :const:`~taichi.types.primitive_types.uint64`
"""
# ----------------------------------------
real_types = [f16, f32, f64, float]
real_type_ids = [id(t) for t in real_types]
integer_types = [i8, i16, i32, i64, u8, u16, u32, u64, int]
integer_type_ids = [id(t) for t in integer_types]
types = real_types + integer_types
type_ids = [id(t) for t in types]
__all__ = [
'float32',
'f32',
'float64',
'f64',
'float16',
'f16',
'int8',
'i8',
'int16',
'i16',
'int32',
'i32',
'int64',
'i64',
'uint8',
'u8',
'uint16',
'u16',
'uint32',
'u32',
'uint64',
'u64',
]
|
__version__ = "v2022.05.26"
|
import unittest
from rts.core.thr import Thread
from rts.core.pts import ParaTaskSet
from rts.sched import bcl_mod
class BCLTestCase(unittest.TestCase):
""" Tests for `bcl.py`."""
def test_two_thread(self):
thread_param1 = {
'id': 11,
'exec_time': 4,
'deadline': 10,
'period': 10,
}
t1 = Thread(**thread_param1)
thread_param2 = {
'id': 12,
'exec_time': 2,
'deadline': 10,
'period': 10,
}
t2 = Thread(**thread_param2)
thread_param3 = {
'id': 21,
'exec_time': 2,
'deadline': 10,
'period': 10,
}
t3 = Thread(**thread_param3)
pts = ParaTaskSet()
pts.append(t1, 0)
pts.append(t2, 0)
pts.append(t3, 1)
bcl_mod_param = {
'num_core': 2,
}
self.assertTrue(bcl_mod.is_schedulable(pts, **bcl_mod_param))
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
import unittest
import eduid_userdb.element
import eduid_userdb.exceptions
from eduid_userdb.orcid import OidcAuthorization, OidcIdToken, Orcid
__author__ = 'lundberg'
token_response = {
"access_token": "b8b8ca5d-b233-4d49-830a-ede934c626d3",
"expires_in": 631138518,
"id_token": {
"at_hash": "hVBHwPjPNgJH5f87ez8h0w",
"aud": ["APP_ID"],
"auth_time": 1526389879,
"exp": 1526392540,
"family_name": "Testsson",
"given_name": "Testarn",
"iat": 1526391940,
"iss": "https://op.example.org",
"jti": "4a721a4b-301a-492b-950a-1b4a83d30149",
"sub": "subject_identifier",
"nonce": "a_nonce_token",
},
"name": "Testarn Testsson",
"orcid": "user_orcid",
"refresh_token": "a110e7d2-4968-42d4-a91d-f379b55a0e60",
"scope": "openid",
"token_type": "bearer",
}
class TestOrcid(unittest.TestCase):
maxDiff = None
def test_id_token(self):
id_token_data = token_response['id_token']
id_token_data['created_by'] = 'test'
id_token_1 = OidcIdToken.from_dict(id_token_data)
id_token_2 = OidcIdToken(
iss=id_token_data['iss'],
sub=id_token_data['sub'],
aud=id_token_data['aud'],
exp=id_token_data['exp'],
iat=id_token_data['iat'],
nonce=id_token_data['nonce'],
auth_time=id_token_data['auth_time'],
created_by='test',
)
self.assertIsInstance(id_token_1, OidcIdToken)
self.assertIsInstance(id_token_1.to_dict(), dict)
self.assertEqual(id_token_1.key, id_token_2.key)
dict_1 = id_token_1.to_dict()
dict_2 = id_token_2.to_dict()
del dict_2['created_ts']
del dict_2['modified_ts']
assert dict_1 == dict_2
with self.assertRaises(eduid_userdb.exceptions.UserDBValueError):
OidcIdToken.from_dict(None)
def test_oidc_authz(self):
id_token_data = token_response['id_token']
id_token_data['created_by'] = 'test'
id_token = OidcIdToken.from_dict(token_response['id_token'])
token_response['created_by'] = 'test'
oidc_authz_1 = OidcAuthorization.from_dict(token_response)
oidc_authz_2 = OidcAuthorization(
access_token=token_response['access_token'],
token_type=token_response['token_type'],
id_token=id_token,
expires_in=token_response['expires_in'],
refresh_token=token_response['refresh_token'],
created_by='test',
)
self.assertIsInstance(oidc_authz_1, OidcAuthorization)
self.assertIsInstance(oidc_authz_1.to_dict(), dict)
self.assertEqual(oidc_authz_1.key, oidc_authz_2.key)
dict_1 = oidc_authz_1.to_dict()
dict_2 = oidc_authz_2.to_dict()
del dict_2['created_ts']
del dict_2['modified_ts']
assert dict_1 == dict_2
with self.assertRaises(eduid_userdb.exceptions.UserDBValueError):
OidcAuthorization.from_dict(None)
def test_orcid(self):
token_response['id_token']['created_by'] = 'test'
token_response['created_by'] = 'test'
oidc_authz = OidcAuthorization.from_dict(token_response)
orcid_1 = Orcid(
id='https://op.example.org/user_orcid', oidc_authz=oidc_authz, created_by='test', is_verified=True
)
orcid_2 = Orcid.from_dict(data=orcid_1.to_dict())
self.assertIsInstance(orcid_1, Orcid)
self.assertIsInstance(orcid_1.to_dict(), dict)
self.assertEqual(orcid_1.key, orcid_2.key)
self.assertEqual(orcid_1.id, orcid_2.id)
self.assertEqual(orcid_1.id, orcid_2.key)
self.assertEqual(orcid_1.oidc_authz.key, orcid_2.oidc_authz.key)
self.assertEqual(orcid_1.oidc_authz.id_token.key, orcid_2.oidc_authz.id_token.key)
dict_1 = orcid_1.to_dict()
dict_2 = orcid_2.to_dict()
assert dict_1 == dict_2
with self.assertRaises(TypeError):
data = orcid_1.to_dict()
data['unknown_key'] = 'test'
Orcid.from_dict(data)
with self.assertRaises(eduid_userdb.exceptions.UserDBValueError):
Orcid.from_dict(None)
|
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
# Create your models here.
class Sns(models.Model):
author = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='user')
text = models.TextField(blank=True)
image = models.ImageField(upload_to='timeline_photo/%Y/%m/%d')
created = models.DateTimeField(auto_now_add=True)
update = models.DateTimeField(auto_now=True)
like = models.ManyToManyField(User, related_name='like_post', blank=True)
favorite = models.ManyToManyField(
User, related_name='favorite_post', blank=True)
def __str__(self):
return "text : "+self.text
class Meta:
ordering = ['-created']
def get_absolute_url(self):
return reverse('sns:detail', args=[self.id])
@property
def created_korean_time(self):
korean_timezone = timezone(settings.TIME_ZONE)
return self.created.astimezone(korean_timezone)
|
import pandas as pd
def generate_countryDB(country_name:str):
''' Generates a csv database with all cities of a defined country.
Args:
country_name (str): Country name in english, not abbreviated.
Returns:
df_col (pandas.DataFrame):
Writes: A csv file with the cities of the specified country with the name 'country_name.csv'.
'''
df = pd.read_csv('../db/worldcities.csv')
df_country = df[df['country'] == country_name].reset_index(drop = True)
df_col = df_country[['city', 'lat', 'lng', 'admin_name', 'population']]
df_col.to_csv('../db/{}.csv'.format(country_name.lower()))
return df_col
def filter_cities_by_population(df, pop_filter:int):
''' Filter cities above a population threshold (in thousands).
Arguments:
df (pandas.DataFrame): Country DF generated by the function generate_countryDB
pop_filters (int): Number of inhabitants in thousands that will serve as a threshold to select only the biger cities.
Returns:
filtered_cities (pandas.DataFrame): A df with the cities with the population bigger than the declared threshold
'''
### I don't want to see all of the cities in NL
print(df.head())
filtered_cities = df[df['population'] >= pop_filter*(1e3)].reset_index(drop = True)
return filtered_cities
|
from flask import render_template,redirect,url_for,request,abort
from . import main
from flask_login import login_required,current_user
from ..models import User,Comment,Pitch,Upvote,Downvote
from .form import Updateprofile,Topicform,CommentForm
from .. import db
@main.route('/')
def index():
return render_template('index.html')
@main.route('/create_new',methods=['GET','POST'])
@login_required
def new_pitch():
form=Topicform()
if form.validate_on_submit():
title=form.title.data
post=form.post.data
category=form.category.data
user_id=current_user
new_pitch_object=Pitch(post=post,user_id=current_user._get_current_object().id,category=category,title=title)
new_pitch_object.save_pitchez()
return redirect(url_for('main.index'))
return render_template('pitch.html',form=form)
@main.route('/comment/<int:pitch_id>',methods=['GET','POST'])
@login_required
def comment(pitch_id):
form=CommentForm()
pitch=Pitch.query.get(pitch_id)
all_comment=Comment.query.filter_by(pitch_id=pitch_id).all()
if form.validate_on_submit():
comment= form.comment.data
pitch_id= pitch_id
user_id=current_user._get_current_object().id
new_comment = Comment(comment=comment,pitch_id=pitch_id,user_id=user_id)
new_comment.save_comment()
return redirect(url_for('.comment',pitch_id=pitch_id))
return render_template('comment.html',pitch=pitch,form=form,all_comment=all_comment)
@main.route('/user/<name>')
def profile(name):
user=User.query.filter_by(username=name).first()
user_id = current_user._get_current_object().id
posts=Pitch.query.filter_by(user_id=user_id).all()
if user is None:
abort(404)
return render_template("profile/profile.html",user= user,posts=posts)
@main.route('/user/<name>/updateprofile',methods=['GET','POST'])
@login_required
def updateprofile(name):
form = Updateprofile()
user= User.query.filter_by(username = name).first()
if user is None:
abort(404)
if form.validate_on_submit():
user.bio = form.bio.data
user.save_user()
return redirect(url_for('.profile',name=name))
return render_template('profile/update.html' ,form=form)
@main.route('/like/<int:id>',methods=['GET','POST'])
@login_required
def like(id):
get_pitches = Upvote.get_upvotes(id)
valid_string = f'{current_user.id}:{id}'
for pit in get_pitches:
to_str = f'{pit}'
if valid_string == to_str:
return redirect(url_for('main.index',id=id))
else:
continue
new_vote = Upvote(user=current_user,pitch_id=id)
new_vote.save()
return redirect(url_for('main.index',id=id))
main.route('/dislike/<int:id>',methods=['GET','POST'])
@login_required
def dislike(id):
pitch = Downvote.get_downvotes(id)
valid_string = f'{current_user.id}:{id}'
for p in pitch:
to_str = f'{p}'
if valid_string == to_str:
return redirect(url_for('main.index',id=id))
else:
continue
new_downvote = Downvote(user=current_user,pitch_id=id)
new_downvote.save()
return redirect(url_for('main.index',id=id))
|
from datasets import load_dataset
def print_dict(d: dict, name: str = None):
if name is not None:
s = f'===== {name} ====='
print(s)
for k, v in d.items():
print(f'{k} = {v}')
if name is not None:
print(''.join(['=' for _ in range(len(s))]))
|
from flask import Flask, render_template, jsonify, request, session, redirect, flash
import crud, more_crud
import model
import json
import api
import os
def make_dict_books_a_shelf(user, shelf):
current_user = crud.get_user_by_username(user)
if shelf == "all":
books_on_shelf = crud.return_all_books_on_shelves_by_user(current_user.user_id)
else:
books_on_shelf = crud.return_books_on_shelf_by_nickname(shelf, current_user.user_id)
# print("RETREIVED BOOKS ON SEHFL", books_on_shelf)
shelf_books = []
for book in books_on_shelf:
#get sheleved book info : shelf id, bookid, reading and owned statuses
#print("SHEVLEDBOOK ID",current_user, current_user.user_id, book.book_id)
shelf_st = crud.get_shelvedbook(current_user.user_id, book.book_id)
#print("SHELF", shelf_st, shelf_st.bookshelf.nickname)
own_st = crud.get_owned_status(shelf_st.owned_status)
reading_st = crud.get_reading_status(shelf_st.reading_status)
# print("TRYING TO GET SHELF NAME", book, book.book_id, shelf_st, shelf_st.bookshelf.nickname, shelf_st.owned_status, shelf_st.reading_status)
# print(reading_st, own_st)
if book.cover_img_source.startswith('http'):
image_url = 'https'+ str(book.cover_img_source)[4:]
shelf_books.append({'book_id': book.book_id, "title":book.title,
'author': book.author, 'publisher': book.publisher,
'description':book.description, "img":image_url,
'shelf_name': shelf_st.bookshelf.nickname,
'shelf_id':shelf_st.bookshelf.shelf_id,
'reading_stat':reading_st,
'owned_stat':own_st
})
return shelf_books
def update_reading_status(user, book_id, reading_status=None):
if not reading_status:
reading_status = crud.get_reading_status(1)
current_user = crud.get_user_by_username(user)
shelved_book = crud.update_shelvedbook_reading_st(current_user.user_id, \
book_id, reading_status)
return shelved_book
def update_owned_status(user, book_id, owned_status=None):
if not owned_status:
owned_status = crud.get_owned_status(1)
current_user = crud.get_user_by_username(user)
shelved_book = crud.update_shelvedbook_owned_st(current_user.user_id, \
book_id, owned_status)
return shelved_book
|
#!/usr/bin/python
# coding:utf-8
#赤外線遮断レーザーを利用
#起動するとコンソールに処理結果を表示。
#Ctl + Cで終了
#
import RPi.GPIO as GPIO
from time import sleep
#pin番号
pin = 10
#GPIO.setmode(GPIO.BCM)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.IN)
try:
while True:
if GPIO.input(pin) == GPIO.HIGH:
print("1:not blocked")
else:
print("0:blocked")
sleep(0.5)
except:
pass
GPIO.cleanup(pin)
print("proc is end.")
|
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
import numpy as np
def bar_charts():
plt.style.use('seaborn')
# can be further fine-tuned; either 0.5 from start or I include baseline model
labels = ['DSC', 'D2V', 'RASC']
all_means = [0.244, 0.120, 0.486, ]
low_means = [0.340, 0.155, 0.595, ]
high_means = [-77.980/100, -57.523/100, -25.470/100,]
all_stds = [0.036, 0.009, 0.067]
low_stds = [0.039, 0.010, 0.075]
high_stds = [13.631/100, 2.771/100, 5.717/100]
x = np.arange(len(all_means)) # the label locations
width = 0.2 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width, all_means, width, yerr=all_stds, label='AllPSI')
rects2 = ax.bar(x, low_means, width, yerr=low_stds, label='LowPSI')
rects3 = ax.bar(x + width, high_means, width, yerr=high_stds, label='HighPSI')
# ax.set_title('AUC and inverse number of weights by model')
ax.set_xticks(np.arange(len(labels)))
# ax.set_ylim(0.5, 1)
ax.set_ylabel('R2')
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects, labels=None, stds=None):
"""Attach a text label above each bar in *rects*, displaying its height."""
for i, rect in enumerate(rects):
label = labels[i] if labels else rect.get_height()
height = rect.get_height()
y_label = height + 0.02 if not stds else height + stds[i]*0.7 + 0.002
ax.annotate(f'{label}',
xy=(rect.get_x() + rect.get_width() / 2, y_label),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1, stds=all_stds)
autolabel(rects2, stds=low_stds)
autolabel(rects3, stds=high_stds)
fig.tight_layout()
plt.savefig('regression_barcharts.png', dpi=300, bbox_inches='tight')
plt.show()
if __name__ == '__main__':
bar_charts()
|
#!python3
import openpyxl, string, sys
if __name__ == "__main__":
start = sys.argv[1]
blank = sys.argv[2]
wb = openpyxl.load_workbook(sys.argv[3])
sheet = wb.get_sheet_by_name('Sheet')
alphabet = string.ascii_uppercase
newwb = openpyxl.Workbook()
newsheet = newwb.get_active_sheet()
for i in range(0, sheet.max_column):
for j in range(1, int(start)):
cell = "%s%s" % (alphabet[i], j)
newsheet[cell] = sheet[cell].value
for j in range(int(start), int(start) + int(blank)):
pass
for j in range(int(start) + int(blank), sheet.max_row):
cell = "%s%s" % (alphabet[i], j)
newcell = "%s%s" % (alphabet[i], j)
newsheet[newcell] = sheet[cell].value
newwb.save("new-%s" % sys.argv[3])
|
"""
This file lists all the global variables that are used throughout the project.
The two major components of this file are the list of the datasets and the list of the models.
"""
"""
This is where we keep a reference to all the dataset classes in the project.
"""
import medical_ood.datasets.MNIST as MNIST
import medical_ood.datasets.FashionMNIST as FMNIST
import medical_ood.datasets.notMNIST as NMNIST
import medical_ood.datasets.CIFAR as CIFAR
import medical_ood.datasets.noise as noise
import medical_ood.datasets.STL as STL
import medical_ood.datasets.TinyImagenet as TI
import medical_ood.datasets.NIH_Chest as NC
import medical_ood.datasets.MURA as MU
import medical_ood.datasets.PADChest as PC
import medical_ood.datasets.malaria as mal
import medical_ood.datasets.ANHIR as ANH
import medical_ood.datasets.DRD as DRD
import medical_ood.datasets.DRIMDB as DRM
import medical_ood.datasets.IDC as IDC
import medical_ood.datasets.PCAM as PCAM
import medical_ood.datasets.RIGA as RIGA
all_dataset_classes = [
MNIST.MNIST,
FMNIST.FashionMNIST,
NMNIST.NotMNIST,
CIFAR.CIFAR10,
CIFAR.CIFAR100,
STL.STL10,
TI.TinyImagenet,
noise.UniformNoise,
noise.NormalNoise,
STL.STL10d32,
TI.TinyImagenetd32,
NC.NIHChest,
NC.NIHChestBinary,
NC.NIHChestBinaryTest,
NC.NIHChestBinaryTrainSplit,
NC.NIHChestBinaryValSplit,
NC.NIHChestBinaryTestSplit,
MU.MURA,
MU.MURAHAND,
MU.MURAELBOW,
MU.MURAFINGER,
MU.MURAFOREARM,
MU.MURAHUMERUS,
MU.MURASHOULDER,
MU.MURAWRIST,
PC.PADChest,
PC.PADChestAP,
PC.PADChestPA,
PC.PADChestL,
PC.PADChestAPHorizontal,
PC.PADChestPED,
mal.Malaria,
ANH.ANHIR,
DRD.DRD,
DRM.DRIMDB,
IDC.IDC,
PCAM.PCAM,
PCAM.PCAMGray,
RIGA.RIGA,
]
"""
Not all the datasets can be used as a Dv, Dt (aka D2) for each dataset.
The list below specifies which datasets can be used as the D2 for the other datasets.
For instance, STL10 and CIFAR10 cannot face each other because they have 9 out 10 classes
in common.
"""
d2_compatiblity = {
# This can be used as d2 for # this
"MNIST": [
"FashionMNIST",
"CIFAR10",
"CIFAR100",
"STL10",
"TinyImagenet",
"STL10d32",
"TinyImagenetd32",
"NIHCC",
"NIHChestBinaryTest",
"NIHChestBinaryTrainSplit",
"PADChest",
"DRD",
"PCAM",
],
"NotMNIST": [
"MNIST",
"FashionMNIST",
"CIFAR10",
"CIFAR100",
"STL10",
"TinyImagenet",
"STL10d32",
"TinyImagenetd32",
"NIHCC",
"NIHChestBinaryTest",
"NIHChestBinaryTrainSplit",
"PADChest",
"DRD",
"PCAM",
],
"FashionMNIST": [
"MNIST",
"CIFAR10",
"CIFAR100",
"STL10",
"TinyImagenet",
"STL10d32",
"TinyImagenetd32",
"NIHCC",
"NIHChestBinaryTest",
"NIHChestBinaryTrainSplit",
"PADChest",
"DRD",
"PCAM",
],
"CIFAR10": [
"MNIST",
"FashionMNIST",
"CIFAR100",
"TinyImagenet",
"TinyImagenetd32",
"NIHCC",
"NIHChestBinaryTest",
"NIHChestBinaryTrainSplit",
"PADChest",
"DRD",
"PCAM",
],
"CIFAR100": [
"MNIST",
"FashionMNIST",
"CIFAR10",
"STL10",
"TinyImagenet",
"STL10d32",
"TinyImagenetd32",
"NIHCC",
"NIHChestBinaryTest",
"NIHChestBinaryTrainSplit",
"PADChest",
"DRD",
"PCAM",
],
"STL10": [
"MNIST",
"FashionMNIST",
"CIFAR100",
"TinyImagenet",
"TinyImagenetd32",
"NIHCC",
"NIHCC",
"NIHChestBinaryTrainSplit",
"PADChest",
"DRD",
"PCAM",
],
"TinyImagenet": [
"MNIST",
"FashionMNIST",
"CIFAR10",
"CIFAR100",
"STL10",
"STL10d32",
"NIHCC",
"NIHChestBinaryTest",
"NIHChestBinaryTrainSplit",
"PADChest",
"DRD",
"PCAM",
],
"NIHChestBinary": [
"MNIST",
"FashionMNIST",
"CIFAR10",
"CIFAR100",
"STL10",
"TinyImagenet",
"STL10d32",
"TinyImagenetd32",
"DRD",
"PCAM",
],
"NIHCC": [
"FashionMNIST",
"CIFAR10",
"CIFAR100",
"STL10",
"TinyImagenet",
"STL10d32",
"TinyImagenetd32",
"NIHCC",
"DRD",
"PCAM",
],
"NIHChestBinaryValSplit": [
"FashionMNIST",
"CIFAR10",
"CIFAR100",
"STL10",
"TinyImagenet",
"STL10d32",
"TinyImagenetd32",
"NIHChestBinaryTrainSplit",
"DRD",
"PCAM",
],
"MURA": ["NIHCC", "PADChest"],
"MURAHAND": ["NIHCC", "PADChest"],
"MURAWRIST": ["NIHCC", "PADChest"],
"MURAELBOW": ["NIHCC", "PADChest"],
"MURAFINGER": ["NIHCC", "PADChest"],
"MURAFOREARM": ["NIHCC", "PADChest"],
"MURAHUMERUS": ["NIHCC", "PADChest"],
"MURASHOULDER": ["NIHCC", "PADChest"],
"PADChest": ["NIHCC", "PADChest"],
"PADChestPA": ["NIHCC", "PADChest"],
"PADChestAP": ["NIHCC", "PADChest"],
"PADChestL": ["NIHCC", "PADChest"],
"PADChestAPHorizontal": ["NIHCC", "PADChest"],
"PADChestPED": ["NIHCC", "PADChest"],
"Malaria": [
"PCAM",
],
"ANHIR": [
"PCAM",
],
"IDC": [
"PCAM",
],
"DRIMDB": [
"DRD",
],
"RIGA": [
"DRD",
],
# STL10 is not compatible with CIFAR10 because of the 9-overlapping classes.
# Erring on the side of caution.
}
# We can augment the following training data with mirroring.
# We make sure there's no information leak in-between tasks.
mirror_augment = {
"FashionMNIST",
"CIFAR10",
"CIFAR100",
"STL10",
"TinyImagenet",
"STL10d32",
"TinyImagenetd32",
}
##################################################################
# Do not change anything below, unless you know what you are doing.
"""
all_datasets is automatically generated
all_datasets = {
'MNIST' : MNIST,
...
}
"""
all_datasets = {}
for dscls in all_dataset_classes:
all_datasets[dscls.__name__] = dscls
def get_ref_classifier(dataset):
if dataset in dataset_reference_classifiers:
return dataset_reference_classifiers[dataset]
raise NotImplementedError()
def get_ref_autoencoder(dataset):
if dataset in dataset_reference_autoencoders:
return dataset_reference_autoencoders[dataset]
raise NotImplementedError()
def get_ref_vae(dataset):
if dataset in dataset_reference_vaes:
return dataset_reference_vaes[dataset]
raise NotImplementedError()
def get_ref_ali(dataset):
if dataset in dataset_reference_ALI:
return dataset_reference_ALI[dataset]
raise NotImplementedError()
def get_ref_pixelcnn(dataset):
if dataset in dataset_reference_pcnns:
return dataset_reference_pcnns[dataset]
raise NotImplementedError()
def get_method(name, args):
elements = name.split("/")
instance = all_methods[elements[0]](args)
if len(elements) > 1:
instance.default_model = int(elements[1])
return instance
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from setuptools import setup
import re
def requirements(filename):
with open(filename) as f:
lines = f.read().splitlines()
c = re.compile(r'\s*#.*')
return filter(bool, map(lambda y: c.sub('', y).strip(), lines))
setup(
name='cfgm_common',
version='0.1dev',
packages=['cfgm_common',
'cfgm_common.ifmap',
'cfgm_common.uve',
'cfgm_common.uve.acl',
'cfgm_common.uve.service_instance',
'cfgm_common.uve.vnc_api',
'cfgm_common.uve.virtual_machine',
'cfgm_common.uve.virtual_network',
'cfgm_common.uve.cfgm_cpuinfo',
'cfgm_common.uve.cfgm_cpuinfo.cpuinfo',
'cfgm_common.uve.cfgm_cpuinfo.process_info'
],
package_data={'': ['*.html', '*.css', '*.xml']},
zip_safe=False,
long_description="VNC Configuration Common Utils",
install_requires=requirements('requirements.txt'),
test_suite='tests',
)
|
"""
This file contains the Models that represent tables in the system's database
and helper functions to interact with the tables.
"""
from capstoneproject.models.fields import *
from capstoneproject.models.models import *
from capstoneproject.models.querysets import *
from capstoneproject.models.db_queries import *
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
import models
# Register your models here.
admin.site.register(models.Student)
admin.site.register(models.Course)
admin.site.register(models.SC)
|
#!/usr/bin/env python
import sys
import threading
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
from cgi import urlparse
#enable threaded server
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
#custom handler for getting routes
class PrimeHandler(BaseHTTPRequestHandler):
def __init__(self, *args):
BaseHTTPRequestHandler.__init__(self, *args)
#parse the request because we dont get this for free!
def handle_request(self):
#split the query from the path
try:
split = urlparse.urlsplit(self.path)
except:
raise Exception('Try a url with 2 components: is_prime?possible_prime=SOME_NUMBER')
if split.path != '/is_prime':
raise Exception('Try a valid path: is_prime')
#get a dict and unexplode non-list entries
try:
params = urlparse.parse_qs(split.query)
if len(params['possible_prime']) > 1:
raise Exception('Only one possible_prime at a time')
prime = int(params['possible_prime'][0])
except Exception as e:
raise e
#prime computation
divisor = 2;
high = prime;
while divisor < high:
if prime % divisor == 0:
break
high = prime / divisor
divisor += 1
if divisor < high:
prime = 2
#hand it back
return str(prime)
#send a success
def succeed(self, response):
self.send_response(200)
#set some basic info
self.send_header('Access-Control-Allow-Origin','*')
self.send_header('Content-type', 'text/plain;charset=utf-8')
self.send_header('Content-length', len(response))
self.end_headers()
#hand it back
self.wfile.write(response)
#send a fail
def fail(self, error):
self.send_response(400)
#set some basic info
self.send_header('Access-Control-Allow-Origin','*')
self.send_header('Content-type', 'text/plain;charset=utf-8')
self.send_header('Content-length', len(error))
self.end_headers()
#hand it back
self.wfile.write(str(error))
#handle the request
def do_GET(self):
#get out the bits we care about
try:
response = self.handle_request()
self.succeed(response)
except Exception as e:
self.fail(str(e))
#go off and wait for connections
if __name__ == '__main__':
#setup the server
server = ('0.0.0.0', 8002)
PrimeHandler.protocol_version = 'HTTP/1.0'
httpd = ThreadedHTTPServer(server, PrimeHandler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.server_close()
|
from django.core.exceptions import ObjectDoesNotExist
from metrics.models import Metric, MetricValue
from metrics import handlers
from metrics.jira import request_jira_api
import celery
from datetime import timedelta
from django.conf import settings
import logging
log = logging.getLogger(__name__)
@celery.task()
def run_metric_calculation(metric_id):
log.info('Getting metric {} from DB'.format(metric_id))
try:
metric = Metric.objects.get(pk=metric_id)
except ObjectDoesNotExist:
log.error('Metric with id {} was not found in DB'.format(metric_id))
raise ObjectDoesNotExist
expand = None
fields = ['created', 'resolutiondate']
if metric.handler in ['cycletime']:
expand = ['changelog']
method = getattr(handlers, metric.handler)
if settings.JIRA_INTEGRATION:
try:
data = request_jira_api(metric.query, fields=fields, expand=expand)
except Exception as e:
metric.error = e
metric.save()
raise e
log.info('Handling selected data started')
try:
value = method(data) # handler value
log.info('Handling selected data finished')
except Exception as e:
log.error('Selected data "{}" can not be handle by {}-handler: {}'.
format(data, metric.handler, e))
metric.error = e
metric.save()
raise e
metric.error = ''
metric.save()
log.info('Creating metric value for metric {}'.format(metric.name))
MetricValue.objects.create(metric=metric, value=value)
log.info('Metric created')
else:
log.info('Jira integration is off. '
'If you want to use this feature, turn it on.')
@celery.task()
def restore_metric_values(metric_id, query, step, handler, handler_field):
expand = None
if handler in ['cycletime', 'leadtime']:
expand = ['changelog']
if settings.JIRA_INTEGRATION:
try:
method = getattr(handlers, handler)
data = request_jira_api(
query=query,
fields=['created', 'resolutiondate'],
expand=expand)
results = group_issues_by_step(data, int(step), handler_field)
except Exception as e:
raise e
for date, group in results.items():
log.info('Handling selected data started')
try:
value = method(group) # handler value
log.info('Handling selected data finished')
except Exception as e:
log.error(
'Selected data "{}" can not be handle by {}-handler: {}'.
format(data, handler, e))
raise e
MetricValue.objects.create(metric_id=metric_id, value=value,
created=date)
else:
log.info('Jira integration is off. '
'If you want to use this feature, turn it on.')
def group_issues_by_step(issues, step_in_days, field):
sorted_issues = sorted(
issues, key=lambda item: item.get_datetime(field))
start_date = sorted_issues[-1].get_datetime(field)
current_interval = start_date - timedelta(days=step_in_days)
group_issues = {}
tmp = []
while len(sorted_issues) != 0:
issue = sorted_issues.pop()
if issue.get_datetime(field) >= current_interval:
tmp.append(issue)
else:
group_issues[(current_interval + timedelta(days=step_in_days))
.strftime('%Y-%m-%d')] = tmp
current_interval = current_interval - timedelta(days=step_in_days)
sorted_issues.append(issue)
tmp = []
if len(tmp) != 0:
group_issues[(current_interval + timedelta(days=step_in_days))
.strftime('%Y-%m-%d')] = tmp
return group_issues
|
import numpy as np
from gym.spaces import Box
import warnings
import gym_electric_motor as gem
from ..random_component import RandomComponent
from ..core import PhysicalSystem
from ..utils import set_state_array
class SCMLSystem(PhysicalSystem, RandomComponent):
"""
The SCML(Supply-Converter-Motor-Load)-System is used for the simulation of
a technical setting consisting of these components as well as a noise
generator and a solver for the electrical ODE of the motor and mechanical
ODE of the load.
"""
OMEGA_IDX = 0
TORQUE_IDX = 1
CURRENTS_IDX = []
VOLTAGES_IDX = []
U_SUP_IDX = -1
@property
def limits(self):
return self._limits
@property
def nominal_state(self):
return self._nominal_state
@property
def supply(self):
"""The voltage supply instance in the physical system"""
return self._supply
@property
def converter(self):
"""The power electronic converter instance in the system"""
return self._converter
@property
def electrical_motor(self):
"""The electrical motor instance of the system"""
return self._electrical_motor
@property
def mechanical_load(self):
"""The mechanical load instance in the system"""
return self._mechanical_load
def __init__(self, converter, motor, load, supply, ode_solver, noise_generator=None, tau=1e-4, calc_jacobian=None):
"""
Args:
converter(PowerElectronicConverter): Converter for the physical system
motor(ElectricMotor): Motor of the system
load(MechanicalLoad): Mechanical Load of the System
supply(VoltageSupply): Voltage Supply
ode_solver(OdeSolver): Ode Solver to use in this setting
noise_generator(NoiseGenerator): Noise generator
tau(float): discrete time step of the system
calc_jacobian(bool): If True, the jacobian matrices will be taken into account for the ode-solvers.
Default: The jacobians are used, if available
"""
RandomComponent.__init__(self)
self._converter = converter
self._electrical_motor = motor
self._mechanical_load = load
self._supply = supply
self._noise_generator = noise_generator
state_names = self._build_state_names()
self._noise_generator.set_state_names(state_names)
self._ode_solver = ode_solver
if calc_jacobian is None:
calc_jacobian = self._electrical_motor.HAS_JACOBIAN and self._mechanical_load.HAS_JACOBIAN
if calc_jacobian and self._electrical_motor.HAS_JACOBIAN and self._mechanical_load.HAS_JACOBIAN:
jac = self._system_jacobian
else:
jac = None
if calc_jacobian and jac is None:
warnings.warn('Jacobian Matrix is not provided for either the motor or the load Model')
self._ode_solver.set_system_equation(self._system_equation, jac)
self._mechanical_load.set_j_rotor(self._electrical_motor.motor_parameter['j_rotor'])
self._t = 0
self._set_indices()
state_space = self._build_state_space(state_names)
super().__init__(self._converter.action_space, state_space, state_names, tau)
self._limits = np.zeros_like(state_names, dtype=float)
self._nominal_state = np.zeros_like(state_names, dtype=float)
self._set_limits()
self._set_nominal_state()
self._noise_generator.set_signal_power_level(self._nominal_state)
self.system_state = np.zeros_like(state_names, dtype=float)
self._system_eq_placeholder = None
self._motor_deriv_size = None
self._load_deriv_size = None
self._components = [
self._supply, self._converter, self._electrical_motor, self._mechanical_load, self._ode_solver,
self._noise_generator
]
def _set_limits(self):
"""
Method to set the physical limits from the modules.
"""
for ind, state in enumerate(self._state_names):
motor_lim = self._electrical_motor.limits.get(state, np.inf)
mechanical_lim = self._mechanical_load.limits.get(state, np.inf)
self._limits[ind] = min(motor_lim, mechanical_lim)
self._limits[self._state_positions['u_sup']] = self.supply.u_nominal
def _set_nominal_state(self):
"""
Method to set the nominal values from the modules.
"""
for ind, state in enumerate(self._state_names):
motor_nom = self._electrical_motor.nominal_values.get(state, np.inf)
mechanical_nom = self._mechanical_load.nominal_values.get(state, np.inf)
self._nominal_state[ind] = min(motor_nom, mechanical_nom)
self._nominal_state[self._state_positions['u_sup']] = self.supply.u_nominal
def _build_state_space(self, state_names):
"""
Method to build the normalized state space (i.e. the maximum and minimum possible values for each state variable
normalized by the limits).
Args:
state_names(list(str)): list of the names of each state.
"""
raise NotImplementedError
def _build_state_names(self):
"""
Setting of the state names in the physical system.
"""
raise NotImplementedError
def _set_indices(self):
"""
Setting of indices to faster access the arrays during integration.
"""
self._omega_ode_idx = self._mechanical_load.OMEGA_IDX
self._load_ode_idx = list(range(len(self._mechanical_load.state_names)))
self._ode_currents_idx = list(range(
self._load_ode_idx[-1] + 1, self._load_ode_idx[-1] + 1 + len(self._electrical_motor.CURRENTS)
))
self._motor_ode_idx = self._ode_currents_idx
self.OMEGA_IDX = self.mechanical_load.OMEGA_IDX
self.TORQUE_IDX = len(self.mechanical_load.state_names)
currents_lower = self.TORQUE_IDX + 1
currents_upper = currents_lower + len(self._electrical_motor.CURRENTS)
self.CURRENTS_IDX = list(range(currents_lower, currents_upper))
voltages_lower = currents_upper
voltages_upper = voltages_lower + len(self._electrical_motor.VOLTAGES)
self.VOLTAGES_IDX = list(range(voltages_lower, voltages_upper))
self.U_SUP_IDX = list(range(voltages_upper, voltages_upper + self._supply.voltage_len))
def seed(self, seed=None):
RandomComponent.seed(self, seed)
sub_seeds = self.seed_sequence.spawn(len(self._components))
for component, sub_seed in zip(self._components, sub_seeds):
if isinstance(component, gem.RandomComponent):
component.seed(sub_seed)
def simulate(self, action, *_, **__):
# Docstring of superclass
ode_state = self._ode_solver.y
i_in = self._electrical_motor.i_in(ode_state[self._ode_currents_idx])
switching_times = self._converter.set_action(action, self._t)
for t in switching_times[:-1]:
i_sup = self._converter.i_sup(i_in)
u_sup = self._supply.get_voltage(self._t, i_sup)
u_in = self._converter.convert(i_in, self._ode_solver.t)
u_in = [u * u_s for u in u_in for u_s in u_sup]
self._ode_solver.set_f_params(u_in)
ode_state = self._ode_solver.integrate(t)
i_in = self._electrical_motor.i_in(ode_state[self._ode_currents_idx])
i_sup = self._converter.i_sup(i_in)
u_sup = self._supply.get_voltage(self._t, i_sup)
u_in = self._converter.convert(i_in, self._ode_solver.t)
u_in = [u * u_s for u in u_in for u_s in u_sup]
self._ode_solver.set_f_params(u_in)
ode_state = self._ode_solver.integrate(self._t + self._tau)
self._t = self._ode_solver.t
self._k += 1
torque = self._electrical_motor.torque(ode_state[self._motor_ode_idx])
noise = self._noise_generator.noise()
n_mech_states = len(self.mechanical_load.state_names)
motor_state = ode_state[n_mech_states:]
self.system_state[:n_mech_states] = ode_state[:n_mech_states]
self.system_state[self.TORQUE_IDX] = torque
self.system_state[self.CURRENTS_IDX] = \
motor_state[self._electrical_motor.CURRENTS_IDX]
self.system_state[self.VOLTAGES_IDX] = u_in
self.system_state[self.U_SUP_IDX] = u_sup
return (self.system_state + noise) / self._limits
def _system_equation(self, t, state, u_in, **__):
"""
Systems differential equation system.
It is a concatenation of the motors electrical ode system and the mechanical ode system.
Args:
t(float): Current systems time
state(ndarray(float)): Current systems ODE-State
u_in(list(float)): Input voltages from the converter
Returns:
ndarray(float): The derivatives of the ODE-State. Based on this, the Ode Solver calculates the next state.
"""
if self._system_eq_placeholder is None:
motor_derivative = self._electrical_motor.electrical_ode(
state[self._motor_ode_idx], u_in, state[self._omega_ode_idx]
)
torque = self._electrical_motor.torque(state[self._motor_ode_idx])
load_derivative = self._mechanical_load.mechanical_ode(t, state[
self._load_ode_idx], torque)
self._system_eq_placeholder = np.concatenate((load_derivative,
motor_derivative))
self._motor_deriv_size = motor_derivative.size
self._load_deriv_size = load_derivative.size
else:
self._system_eq_placeholder[:self._load_deriv_size] = \
self._mechanical_load.mechanical_ode(
t, state[self._load_ode_idx],
self._electrical_motor.torque(state[self._motor_ode_idx])
).ravel()
self._system_eq_placeholder[self._load_deriv_size:] = \
self._electrical_motor.electrical_ode(
state[self._motor_ode_idx], u_in, state[self._omega_ode_idx]
).ravel()
return self._system_eq_placeholder
def _system_jacobian(self, t, state, u_in, **__):
motor_jac, el_state_over_omega, torque_over_el_state = self._electrical_motor.electrical_jacobian(
state[self._motor_ode_idx], u_in, state[self._omega_ode_idx]
)
torque = self._electrical_motor.torque(state[self._motor_ode_idx])
load_jac, load_over_torque = self._mechanical_load.mechanical_jacobian(
t, state[self._load_ode_idx], torque
)
system_jac = np.zeros((state.shape[0], state.shape[0]))
system_jac[:load_jac.shape[0], :load_jac.shape[1]] = load_jac
system_jac[-motor_jac.shape[0]:, -motor_jac.shape[1]:] = motor_jac
system_jac[-motor_jac.shape[0]:, [self._omega_ode_idx]] = el_state_over_omega.reshape((-1, 1))
system_jac[:load_jac.shape[0], load_jac.shape[1]:] = np.matmul(
load_over_torque.reshape(-1, 1), torque_over_el_state.reshape(1, -1)
)
return system_jac
def reset(self, *_):
"""
Reset all the systems modules to an initial state.
Returns:
The new state of the system.
"""
self.next_generator()
motor_state = self._electrical_motor.reset(
state_space=self.state_space,
state_positions=self.state_positions)
mechanical_state = self._mechanical_load.reset(
state_space=self.state_space,
state_positions=self.state_positions,
nominal_state=self.nominal_state)
ode_state = np.concatenate((mechanical_state, motor_state))
u_sup = self.supply.reset()
u_in = self.converter.reset()
u_in = [u * u_s for u in u_in for u_s in u_sup]
torque = self.electrical_motor.torque(motor_state)
noise = self._noise_generator.reset()
self._t = 0
self._k = 0
self._ode_solver.set_initial_value(ode_state, self._t)
system_state = np.concatenate((
ode_state[:len(self._mechanical_load.state_names)],
[torque],
motor_state[self._electrical_motor.CURRENTS_IDX],
u_in,
u_sup
))
return (system_state + noise) / self._limits
class DcMotorSystem(SCMLSystem):
"""
SCML-System that can be used for all DC Motors.
"""
def _build_state_names(self):
# Docstring of superclass
return (
self._mechanical_load.state_names
+ ['torque']
+ self._electrical_motor.CURRENTS
+ self._electrical_motor.VOLTAGES
+ ['u_sup']
)
def _build_state_space(self, state_names):
# Docstring of superclass
low, high = self._electrical_motor.get_state_space(self._converter.currents, self._converter.voltages)
low_mechanical, high_mechanical = self._mechanical_load.get_state_space((low['omega'], high['omega']))
low.update(low_mechanical)
high.update(high_mechanical)
high['u_sup'] = self._supply.supply_range[1] / self._supply.u_nominal
if self._supply.supply_range[0] != self._supply.supply_range[1]:
low['u_sup'] = self._supply.supply_range[0] / self._supply.u_nominal
else:
low['u_sup'] = 0
low = set_state_array(low, state_names)
high = set_state_array(high, state_names)
return Box(low, high, dtype=np.float64)
class ThreePhaseMotorSystem(SCMLSystem):
"""
SCML-System that implements the basic transformations needed for three phase drives.
"""
def abc_to_alphabeta_space(self, abc_quantities):
"""
Transformation from abc to alphabeta space
Args:
abc_quantities: Three quantities in abc-space (e.g. (u_a, u_b, u_c) or (i_a, i_b, i_c))
Returns:
(quantity_alpha, quantity_beta): The quantities in the alphabeta-space
"""
alphabeta_quantity = self._electrical_motor.t_23(abc_quantities)
return alphabeta_quantity
def alphabeta_to_abc_space(self, alphabeta_quantities):
"""
Transformation from dq to abc space
Args:
alphabeta_quantities: Two quantities in alphabeta-space (e.g. (u_alpha, u_beta) or (i_alpha, i_beta))
Returns:
(quantity_a, quantity_b, quantity_c): The quantities in the abc-space
"""
return self._electrical_motor.t_32(alphabeta_quantities)
def abc_to_dq_space(self, abc_quantities, epsilon_el, normed_epsilon=False):
"""
Transformation from abc to dq space
Args:
abc_quantities: Three quantities in abc-space (e.g. (u_a, u_b, u_c) or (i_a, i_b, i_c))
epsilon_el: Electrical angle of the motor
normed_epsilon(bool): True, if epsilon is normed to [-1,1] else in [-pi, pi] (default)
Returns:
(quantity_d, quantity_q): The quantities in the dq-space
"""
if normed_epsilon:
epsilon_el *= np.pi
dq_quantity = self._electrical_motor.q_inv(self._electrical_motor.t_23(abc_quantities), epsilon_el)
return dq_quantity
def dq_to_abc_space(self, dq_quantities, epsilon_el, normed_epsilon=False):
"""
Transformation from dq to abc space
Args:
dq_quantities: Three quantities in dq-space (e.g. (u_d, u_q) or (i_d, i_q))
epsilon_el: Electrical angle of the motor
normed_epsilon(bool): True, if epsilon is normed to [-1,1] else in [-pi, pi] (default)
Returns:
(quantity_a, quantity_b, quantity_c): The quantities in the abc-space
"""
if normed_epsilon:
epsilon_el *= np.pi
return self._electrical_motor.t_32(self._electrical_motor.q(dq_quantities, epsilon_el))
def alphabeta_to_dq_space(self, alphabeta_quantities, epsilon_el, normed_epsilon=False):
"""
Transformation from alphabeta to dq space
Args:
alphabeta_quantities: Two quantities in alphabeta-space (e.g. (u_alpha, u_beta) or (i_alpha, i_beta))
epsilon_el: Electrical angle of the motor
normed_epsilon(bool): True, if epsilon is normed to [-1,1] else in [-pi, pi] (default)
Returns:
(quantity_d, quantity_q): The quantities in the dq-space
"""
if normed_epsilon:
epsilon_el *= np.pi
dq_quantity = self._electrical_motor.q_inv(alphabeta_quantities, epsilon_el)
return dq_quantity
def dq_to_alphabeta_space(self, dq_quantities, epsilon_el, normed_epsilon=False):
"""
Transformation from dq to alphabeta space
Args:
dq_quantities: Two quantities in dq-space (e.g. (u_d, u_q) or (i_d, i_q))
epsilon_el: Electrical angle of the motor
normed_epsilon(bool): True, if epsilon is normed to [-1,1] else in [-pi, pi] (default)
Returns:
(quantity_alpha, quantity_beta): The quantities in the alphabeta-space
"""
if normed_epsilon:
epsilon_el *= np.pi
return self._electrical_motor.q(dq_quantities, epsilon_el)
class SynchronousMotorSystem(ThreePhaseMotorSystem):
"""
SCML-System that can be used with all Synchronous Motors
"""
def __init__(self, control_space='abc', **kwargs):
"""
Args:
control_space(str):('abc' or 'dq') Choose, if actions the actions space is in dq or abc space
kwargs: Further arguments to pass tp SCMLSystem
"""
super().__init__(**kwargs)
self.control_space = control_space
if control_space == 'dq':
assert type(self._converter.action_space) == Box, \
'dq-control space is only available for Continuous Controlled Converters'
self._action_space = Box(-1, 1, shape=(2,), dtype=np.float64)
def _build_state_space(self, state_names):
# Docstring of superclass
low = -1 * np.ones_like(state_names, dtype=float)
low[self.U_SUP_IDX] = 0.0
high = np.ones_like(state_names, dtype=float)
return Box(low, high, dtype=np.float64)
def _build_state_names(self):
# Docstring of superclass
return (
self._mechanical_load.state_names +['torque',
'i_a', 'i_b', 'i_c', 'i_sd', 'i_sq',
'u_a', 'u_b', 'u_c', 'u_sd', 'u_sq',
'epsilon', 'u_sup',
]
)
def _set_indices(self):
# Docstring of superclass
self._omega_ode_idx = self._mechanical_load.OMEGA_IDX
self._load_ode_idx = list(range(len(self._mechanical_load.state_names)))
self._ode_currents_idx = list(range(
self._load_ode_idx[-1] + 1, self._load_ode_idx[-1] + 1 + len(self._electrical_motor.CURRENTS)
))
self._motor_ode_idx = self._ode_currents_idx
self._motor_ode_idx += [self._motor_ode_idx[-1] + 1]
self._ode_currents_idx = self._motor_ode_idx[:-1]
self.OMEGA_IDX = self.mechanical_load.OMEGA_IDX
self.TORQUE_IDX = len(self.mechanical_load.state_names)
currents_lower = self.TORQUE_IDX + 1
currents_upper = currents_lower + 5
self.CURRENTS_IDX = list(range(currents_lower, currents_upper))
voltages_lower = currents_upper
voltages_upper = voltages_lower + 5
self.VOLTAGES_IDX = list(range(voltages_lower, voltages_upper))
self.EPSILON_IDX = voltages_upper
self.U_SUP_IDX = list(range(self.EPSILON_IDX + 1, self.EPSILON_IDX + 1 + self._supply.voltage_len))
self._ode_epsilon_idx = self._motor_ode_idx[-1]
def simulate(self, action, *_, **__):
# Docstring of superclass
ode_state = self._ode_solver.y
eps = ode_state[self._ode_epsilon_idx]
if self.control_space == 'dq':
action = self.dq_to_abc_space(action, eps)
i_in = self.dq_to_abc_space(self._electrical_motor.i_in(ode_state[self._ode_currents_idx]), eps)
switching_times = self._converter.set_action(action, self._t)
for t in switching_times[:-1]:
i_sup = self._converter.i_sup(i_in)
u_sup = self._supply.get_voltage(self._t, i_sup)
u_in = self._converter.convert(i_in, self._ode_solver.t)
u_in = [u * u_s for u in u_in for u_s in u_sup]
u_dq = self.abc_to_dq_space(u_in, eps)
self._ode_solver.set_f_params(u_dq)
ode_state = self._ode_solver.integrate(t)
eps = ode_state[self._ode_epsilon_idx]
i_in = self.dq_to_abc_space(self._electrical_motor.i_in(ode_state[self._ode_currents_idx]), eps)
i_sup = self._converter.i_sup(i_in)
u_sup = self._supply.get_voltage(self._t, i_sup)
u_in = self._converter.convert(i_in, self._ode_solver.t)
u_in = [u * u_s for u in u_in for u_s in u_sup]
u_dq = self.abc_to_dq_space(u_in, eps)
self._ode_solver.set_f_params(u_dq)
ode_state = self._ode_solver.integrate(self._t + self._tau)
self._t = self._ode_solver.t
self._k += 1
torque = self._electrical_motor.torque(ode_state[self._motor_ode_idx])
noise = self._noise_generator.noise()
mechanical_state = ode_state[self._load_ode_idx]
i_dq = ode_state[self._ode_currents_idx]
i_abc = list(
self.dq_to_abc_space(i_dq, eps)
)
eps = ode_state[self._ode_epsilon_idx] % (2 * np.pi)
if eps > np.pi:
eps -= 2 * np.pi
system_state = np.concatenate((
mechanical_state,
[torque],
i_abc, i_dq,
u_in, u_dq,
[eps],
u_sup
))
return (system_state + noise) / self._limits
def reset(self, *_):
# Docstring of superclass
motor_state = self._electrical_motor.reset(
state_space=self.state_space,
state_positions=self.state_positions)
mechanical_state = self._mechanical_load.reset(
state_positions=self.state_positions,
state_space=self.state_space,
nominal_state=self.nominal_state)
ode_state = np.concatenate((mechanical_state, motor_state))
u_sup = self.supply.reset()
eps = ode_state[self._ode_epsilon_idx]
if eps > np.pi:
eps -= 2 * np.pi
u_abc = self.converter.reset()
u_abc = [u * u_s for u in u_abc for u_s in u_sup]
u_dq = self.abc_to_dq_space(u_abc, eps)
i_dq = ode_state[self._ode_currents_idx]
i_abc = self.dq_to_abc_space(i_dq, eps)
torque = self.electrical_motor.torque(motor_state)
noise = self._noise_generator.reset()
self._t = 0
self._k = 0
self._ode_solver.set_initial_value(ode_state, self._t)
system_state = np.concatenate((
mechanical_state,
[torque],
i_abc, i_dq,
u_abc, u_dq,
[eps],
u_sup,
))
return (system_state + noise) / self._limits
class SquirrelCageInductionMotorSystem(ThreePhaseMotorSystem):
"""
SCML-System for the Squirrel Cage Induction Motor
"""
def __init__(self, control_space='abc', ode_solver='scipy.ode', **kwargs):
"""
Args:
control_space(str):('abc' or 'dq') Choose, if actions the actions space is in dq or abc space
kwargs: Further arguments to pass tp SCMLSystem
"""
super().__init__(ode_solver=ode_solver, **kwargs)
self.control_space = control_space
if control_space == 'dq':
self._action_space = Box(-1, 1, shape=(2,), dtype=np.float64)
def _build_state_space(self, state_names):
# Docstring of superclass
low = -1 * np.ones_like(state_names, dtype=float)
low[self.U_SUP_IDX] = 0.0
high = np.ones_like(state_names, dtype=float)
return Box(low, high, dtype=np.float64)
def _build_state_names(self):
# Docstring of superclass
return (
self._mechanical_load.state_names + ['torque',
'i_sa', 'i_sb', 'i_sc', 'i_sd', 'i_sq',
'u_sa', 'u_sb', 'u_sc', 'u_sd', 'u_sq',
'epsilon', 'u_sup',
]
)
def _set_indices(self):
# Docstring of superclass
super()._set_indices()
self._motor_ode_idx += range(self._motor_ode_idx[-1] + 1, self._motor_ode_idx[-1] + 1 + len(self._electrical_motor.FLUXES))
self._motor_ode_idx += [self._motor_ode_idx[-1] + 1]
self._ode_currents_idx = self._motor_ode_idx[self._electrical_motor.I_SALPHA_IDX:self._electrical_motor.I_SBETA_IDX + 1]
self._ode_flux_idx = self._motor_ode_idx[self._electrical_motor.PSI_RALPHA_IDX:self._electrical_motor.PSI_RBETA_IDX + 1]
self.OMEGA_IDX = self.mechanical_load.OMEGA_IDX
self.TORQUE_IDX = len(self.mechanical_load.state_names)
currents_lower = self.TORQUE_IDX + 1
currents_upper = currents_lower + 5
self.CURRENTS_IDX = list(range(currents_lower, currents_upper))
voltages_lower = currents_upper
voltages_upper = voltages_lower + 5
self.VOLTAGES_IDX = list(range(voltages_lower, voltages_upper))
self.EPSILON_IDX = voltages_upper
self.U_SUP_IDX = list(range(self.EPSILON_IDX + 1, self.EPSILON_IDX + 1 + self._supply.voltage_len))
self._ode_epsilon_idx = self._motor_ode_idx[-1]
def calculate_field_angle(self, state):
psi_ralpha = state[self._ode_flux_idx[0]]
psi_rbeta = state[self._ode_flux_idx[1]]
eps_fs = np.arctan2(psi_rbeta, psi_ralpha)
return eps_fs
def simulate(self, action, *_, **__):
# Docstring of superclass
ode_state = self._ode_solver.y
eps_fs = self.calculate_field_angle(ode_state)
if self.control_space == 'dq':
action = self.dq_to_abc_space(action, eps_fs)
i_in = self.alphabeta_to_abc_space(self._electrical_motor.i_in(ode_state[self._ode_currents_idx]))
switching_times = self._converter.set_action(action, self._t)
for t in switching_times[:-1]:
i_sup = self._converter.i_sup(i_in)
u_sup = self._supply.get_voltage(self._t, i_sup)
u_in = self._converter.convert(i_in, self._ode_solver.t)
u_in = [u * u_s for u in u_in for u_s in u_sup]
u_alphabeta = self.abc_to_alphabeta_space(u_in)
self._ode_solver.set_f_params(u_alphabeta)
ode_state = self._ode_solver.integrate(t)
eps_fs = self.calculate_field_angle(ode_state)
i_in = self.alphabeta_to_abc_space(self._electrical_motor.i_in(ode_state[self._ode_currents_idx]))
i_sup = self._converter.i_sup(i_in)
u_sup = self._supply.get_voltage(self._t, i_sup)
u_in = self._converter.convert(i_in, self._ode_solver.t)
u_in = [u * u_s for u in u_in for u_s in u_sup]
u_dq = self.abc_to_dq_space(u_in, eps_fs)
u_alphabeta = self.abc_to_alphabeta_space(u_in)
self._ode_solver.set_f_params(u_alphabeta)
ode_state = self._ode_solver.integrate(self._t + self._tau)
self._t = self._ode_solver.t
self._k += 1
torque = self._electrical_motor.torque(ode_state[self._motor_ode_idx])
noise = self._noise_generator.noise()
mechanical_state = ode_state[self._load_ode_idx]
i_dq = self.alphabeta_to_dq_space(ode_state[self._ode_currents_idx], eps_fs)
i_abc = list(self.dq_to_abc_space(i_dq, eps_fs))
eps = ode_state[self._ode_epsilon_idx] % (2 * np.pi)
if eps > np.pi:
eps -= 2 * np.pi
system_state = np.concatenate((
mechanical_state, [torque],
i_abc, i_dq,
u_in, u_dq,
[eps],
u_sup
))
return (system_state + noise) / self._limits
def reset(self, *_):
# Docstring of superclass
mechanical_state = self._mechanical_load.reset(
state_positions=self.state_positions,
state_space=self.state_space,
nominal_state=self.nominal_state)
motor_state = self._electrical_motor.reset(
state_space=self.state_space,
state_positions=self.state_positions,
omega=mechanical_state)
ode_state = np.concatenate((mechanical_state, motor_state))
u_sup = self.supply.reset()
eps = ode_state[self._ode_epsilon_idx]
eps_fs = self.calculate_field_angle(ode_state)
if eps > np.pi:
eps -= 2 * np.pi
u_abc = self.converter.reset()
u_abc = [u * u_s for u in u_abc for u_s in u_sup]
u_dq = self.abc_to_dq_space(u_abc, eps_fs)
i_dq = self.alphabeta_to_dq_space(ode_state[self._ode_currents_idx], eps_fs)
i_abc = self.dq_to_abc_space(i_dq, eps_fs)
torque = self.electrical_motor.torque(motor_state)
noise = self._noise_generator.reset()
self._t = 0
self._k = 0
self._ode_solver.set_initial_value(ode_state, self._t)
system_state = np.concatenate([
mechanical_state, [torque],
i_abc, i_dq,
u_abc, u_dq,
[eps],
u_sup
])
return (system_state + noise) / self._limits
class DoublyFedInductionMotorSystem(ThreePhaseMotorSystem):
"""
SCML-System for the Doubly Fed Induction Motor
"""
def __init__(self, control_space='abc', ode_solver='scipy.ode', **kwargs):
"""
Args:
control_space(str):('abc' or 'dq') Choose, if actions the actions space is in dq or abc space
kwargs: Further arguments to pass tp SCMLSystem
"""
super().__init__(ode_solver=ode_solver, **kwargs)
self.control_space = control_space
if control_space == 'dq':
self._action_space = Box(-1, 1, shape=(4,), dtype=np.float64)
self.stator_voltage_space_idx = 0
self.stator_voltage_low_idx = 0
self.stator_voltage_high_idx = \
self.stator_voltage_low_idx \
+ self._converter.subsignal_voltage_space_dims[self.stator_voltage_space_idx]
self.rotor_voltage_space_idx = 1
self.rotor_voltage_low_idx = self.stator_voltage_high_idx
self.rotor_voltage_high_idx = \
self.rotor_voltage_low_idx \
+ self._converter.subsignal_voltage_space_dims[self.rotor_voltage_space_idx]
def _set_limits(self):
"""
Method to set the physical limits from the modules.
"""
for ind, state in enumerate(self._state_names):
motor_lim = self._electrical_motor.limits.get(state, np.inf)
mechanical_lim = self._mechanical_load.limits.get(state, np.inf)
self._limits[ind] = min(motor_lim, mechanical_lim)
self._limits[self._state_positions['u_sup']] = self.supply.u_nominal
def _build_state_space(self, state_names):
# Docstring of superclass
low = -1 * np.ones_like(state_names, dtype=float)
low[self.U_SUP_IDX] = 0.0
high = np.ones_like(state_names, dtype=float)
return Box(low, high, dtype=np.float64)
def _build_state_names(self):
# Docstring of superclass
names_l = \
self._mechanical_load.state_names \
+ [
'torque',
'i_sa', 'i_sb', 'i_sc', 'i_sd', 'i_sq',
'i_ra', 'i_rb', 'i_rc', 'i_rd', 'i_rq',
'u_sa', 'u_sb', 'u_sc', 'u_sd', 'u_sq',
'u_ra', 'u_rb', 'u_rc', 'u_rd', 'u_rq',
'epsilon', 'u_sup',
]
return names_l
def _set_indices(self):
# Docstring of superclass
super()._set_indices()
self._motor_ode_idx += range(self._motor_ode_idx[-1] + 1, self._motor_ode_idx[-1] + 1 + len(self._electrical_motor.FLUXES))
self._motor_ode_idx += [self._motor_ode_idx[-1] + 1]
self._ode_currents_idx = self._motor_ode_idx[self._electrical_motor.I_SALPHA_IDX:self._electrical_motor.I_SBETA_IDX + 1]
self._ode_flux_idx = self._motor_ode_idx[self._electrical_motor.PSI_RALPHA_IDX:self._electrical_motor.PSI_RBETA_IDX + 1]
self.OMEGA_IDX = self.mechanical_load.OMEGA_IDX
self.TORQUE_IDX = len(self.mechanical_load.state_names)
currents_lower = self.TORQUE_IDX + 1
currents_upper = currents_lower + 10
self.CURRENTS_IDX = list(range(currents_lower, currents_upper))
voltages_lower = currents_upper
voltages_upper = voltages_lower + 10
self.VOLTAGES_IDX = list(range(voltages_lower, voltages_upper))
self.EPSILON_IDX = voltages_upper
self.U_SUP_IDX = list(range(self.EPSILON_IDX + 1, self.EPSILON_IDX + 1 + self._supply.voltage_len))
self._ode_epsilon_idx = self._motor_ode_idx[-1]
def calculate_field_angle(self, state):
# field angle is calculated from states
psi_ralpha = state[self._motor_ode_idx[self._electrical_motor.PSI_RALPHA_IDX]]
psi_rbeta = state[self._motor_ode_idx[self._electrical_motor.PSI_RBETA_IDX]]
eps_fs = np.arctan2(psi_rbeta, psi_ralpha)
return eps_fs
def calculate_rotor_current(self, state):
# rotor current is calculated from states
mp = self._electrical_motor.motor_parameter
l_r = mp['l_m'] + mp['l_sigr']
i_salpha = state[self._motor_ode_idx[self._electrical_motor.I_SALPHA_IDX]]
i_sbeta = state[self._motor_ode_idx[self._electrical_motor.I_SBETA_IDX]]
psi_ralpha = state[self._motor_ode_idx[self._electrical_motor.PSI_RALPHA_IDX]]
psi_rbeta = state[self._motor_ode_idx[self._electrical_motor.PSI_RBETA_IDX]]
i_ralpha = 1 / l_r * psi_ralpha - mp['l_m'] / l_r * i_salpha
i_rbeta = 1 / l_r * psi_rbeta - mp['l_m'] / l_r * i_sbeta
return [i_ralpha, i_rbeta]
def simulate(self, action, *_, **__):
# Docstring of superclass
# Coordinate Systems used here:
# alphabeta refers to the stator-fixed two-phase reference frame
# gammadelta refers to the rotor-fixed two-phase reference frame
# abc refers to the stator-fixed three-phase reference frame
# def refers to the rotor-fixed three-phase reference frame
# dq refers to the field-oriented (two-phase) reference frame
# e.g. u_rdef is the rotor voltage representation in the rotor-fixed three-phase reference frame
# u_rabc ist the rotor voltage representation in the stator-fixed three-phase reference frame
ode_state = self._ode_solver.y
eps_field = self.calculate_field_angle(ode_state)
eps_el = ode_state[self._ode_epsilon_idx]
# convert dq input voltage to abc
if self.control_space == 'dq':
stator_input_len = len(self._electrical_motor.STATOR_VOLTAGES)
rotor_input_len = len(self._electrical_motor.ROTOR_VOLTAGES)
action_stator = action[:stator_input_len]
action_rotor = action[stator_input_len:stator_input_len + rotor_input_len]
action_stator = self.dq_to_abc_space(action_stator, eps_field)
action_rotor = self.dq_to_abc_space(action_rotor, eps_field-eps_el)
action = np.concatenate((action_stator, action_rotor)).tolist()
i_sabc = self.alphabeta_to_abc_space(self._electrical_motor.i_in(ode_state[self._ode_currents_idx]))
i_rdef = self.alphabeta_to_abc_space(self.calculate_rotor_current(ode_state))
switching_times = self._converter.set_action(action, self._t)
for t in switching_times[:-1]:
i_sup = self._converter.i_sup(np.concatenate((i_sabc, i_rdef)))
u_sup = self._supply.get_voltage(self._t, i_sup)
u_in = self._converter.convert(np.concatenate([i_sabc, i_rdef]).tolist(), self._ode_solver.t)
u_in = [u * u_s for u in u_in for u_s in u_sup]
u_sabc = u_in[self.stator_voltage_low_idx:self.stator_voltage_high_idx]
u_rdef = u_in[self.rotor_voltage_low_idx:self.rotor_voltage_high_idx]
u_rdq = self.abc_to_dq_space(u_rdef, eps_field-eps_el)
u_salphabeta = self.abc_to_alphabeta_space(u_sabc)
u_ralphabeta = self.dq_to_alphabeta_space(u_rdq, eps_field)
u_sr_alphabeta = np.array([u_salphabeta, u_ralphabeta])
self._ode_solver.set_f_params(u_sr_alphabeta)
ode_state = self._ode_solver.integrate(t)
eps_field = self.calculate_field_angle(ode_state)
eps_el = ode_state[self._ode_epsilon_idx]
i_sabc = self.alphabeta_to_abc_space(self._electrical_motor.i_in(ode_state[self._ode_currents_idx]))
i_rdef = self.alphabeta_to_abc_space(self.calculate_rotor_current(ode_state))
i_sup = self._converter.i_sup(np.concatenate((i_sabc, i_rdef)))
u_sup = self._supply.get_voltage(self._t, i_sup)
u_in = self._converter.convert(np.concatenate([i_sabc, i_rdef]).tolist(), self._ode_solver.t)
u_in = [u * u_s for u in u_in for u_s in u_sup]
u_sabc = u_in[self.stator_voltage_low_idx:self.stator_voltage_high_idx]
u_rdef = u_in[self.rotor_voltage_low_idx:self.rotor_voltage_high_idx]
u_sdq = self.abc_to_dq_space(u_sabc, eps_field)
u_rdq = self.abc_to_dq_space(u_rdef, eps_field-eps_el)
u_salphabeta = self.abc_to_alphabeta_space(u_sabc)
u_ralphabeta = self.dq_to_alphabeta_space(u_rdq, eps_field)
u_sr_alphabeta = np.array([u_salphabeta, u_ralphabeta])
self._ode_solver.set_f_params(u_sr_alphabeta)
ode_state = self._ode_solver.integrate(self._t + self._tau)
self._t = self._ode_solver.t
self._k += 1
torque = self._electrical_motor.torque(ode_state[self._motor_ode_idx])
noise = self._noise_generator.noise()
mechanical_state = ode_state[self._load_ode_idx]
i_sdq = self.alphabeta_to_dq_space(ode_state[self._ode_currents_idx], eps_field)
i_sabc = list(self.dq_to_abc_space(i_sdq, eps_field))
i_rdq = self.alphabeta_to_dq_space(self.calculate_rotor_current(ode_state), eps_field)
i_rdef = list(self.dq_to_abc_space(i_rdq, eps_field-eps_el))
eps_el = ode_state[self._ode_epsilon_idx] % (2 * np.pi)
if eps_el > np.pi:
eps_el -= 2 * np.pi
system_state = np.concatenate((
mechanical_state,
[torque],
i_sabc, i_sdq,
i_rdef, i_rdq,
u_sabc, u_sdq,
u_rdef, u_rdq,
[eps_el],
u_sup,
))
return (system_state + noise) / self._limits
def reset(self, *_):
# Docstring of superclass
mechanical_state = self._mechanical_load.reset(
state_positions=self.state_positions,
state_space=self.state_space,
nominal_state=self.nominal_state)
motor_state = self._electrical_motor.reset(
state_space=self.state_space,
state_positions=self.state_positions,
omega=mechanical_state)
ode_state = np.concatenate((mechanical_state, motor_state))
u_sup = self.supply.reset()
eps_el = ode_state[self._ode_epsilon_idx]
eps_field = self.calculate_field_angle(ode_state)
if eps_el > np.pi:
eps_el -= 2 * np.pi
if eps_field > np.pi:
eps_field -= 2 * np.pi
u_sr_abcdef = self.converter.reset()
u_sr_abcdef = [u * u_s for u in u_sr_abcdef for u_s in u_sup]
u_sabc = u_sr_abcdef[self.stator_voltage_low_idx:self.stator_voltage_high_idx]
u_rdef = u_sr_abcdef[self.rotor_voltage_low_idx:self.rotor_voltage_high_idx]
u_sdq = self.abc_to_dq_space(u_sabc, eps_field)
u_rdq = self.abc_to_dq_space(u_rdef, eps_field-eps_el)
i_sdq = self.alphabeta_to_dq_space(ode_state[self._ode_currents_idx], eps_field)
i_sabc = self.dq_to_abc_space(i_sdq, eps_field)
i_rdq = self.alphabeta_to_dq_space(self.calculate_rotor_current(ode_state), eps_field-eps_el)
i_rdef = self.dq_to_abc_space(i_rdq, eps_field-eps_el)
torque = self.electrical_motor.torque(motor_state)
noise = self._noise_generator.reset()
self._t = 0
self._k = 0
self._ode_solver.set_initial_value(ode_state, self._t)
system_state = np.concatenate([
mechanical_state, [torque],
i_sabc, i_sdq,
i_rdef, i_rdq,
u_sabc, u_sdq,
u_rdef, u_rdq,
[eps_el],
u_sup
])
return (system_state + noise) / self._limits
|
from bs4 import BeautifulSoup
import urllib.request
import sys
LAST_PAGES = 3
def getLastPages():
for index in range(0,LAST_PAGES):
print(index)
url = f"https://sanidad.castillalamancha.es/ciudadanos/enfermedades-infecciosas/coronavirus/notas-prensa?page={index}"
urllib.request.urlretrieve(url, f"./notas/notas-prensa-{index}.html")
def getNotesFromDay(date):
for index in range(0,LAST_PAGES):
page = f"./notas/notas-prensa-{index}.html"
getNotesFromPage(page,date)
def getNotesFromPage(page, reqDate = None):
f=open(page,"r")
html=f.read()
soup = BeautifulSoup(html,features="html.parser")
noteList = soup.find("div", {"class": "view-content"}).findAll("div",{"class":"group-right"})
#print(noteList)
for note in noteList:
#print(note)
date=note.find("span").getText().split("/")
isodate=f"{date[2]}-{date[1]}-{date[0]}"
print(f"{isodate}")
if reqDate and reqDate != isodate:
continue
print(isodate + " " + note.getText())
url =note.find("a")['href']
#print(url)
filename = url.split("/")[-1]
urllib.request.urlretrieve(url, f"./notas/{isodate}-{filename}.html")
if __name__ == '__main__':
#getLastPages()
#filename = sys.argv[1]
#getNotesFromPage(filename)
date = sys.argv[1]
getNotesFromDay(date)
|
'''
attenuation after VNA is 30dBm
attenuation after SMF is 40dBm
one amplifier in use 30dBm gain
'''
from __future__ import print_function
import qt
import shutil
import sys
import os
import time
# from future import print_function
from constants import *
def generate_meta_file():
metafile = open('%s.meta.txt' % data.get_filepath()[:-4], 'w')
metafile.write('#inner loop\n%s\n%s\n%s\n%s\n'%
(drive_numpoints, drive_start_freq, drive_stop_freq, 'Frequency(Hz)'))
metafile.write('#outer loop\n%s\n%s\n%s\n%s\n'%
(probe_power_numpoint, probe_stop_power, probe_start_power, 'dBm'))
metafile.write('#outermost loop (unused)\n1\n0\n1\nNothing\n')
metafile.write('#for each of the values\n')
values = data.get_values()
i=0
while i<len(values):
metafile.write('%d\n%s\n'% (i+3, values[i]['name']))
i+=1
metafile.close()
def copy_script(once):
if once:
shutil.copy2('%s'%sys.argv[0],'%s/%s'%(data.get_filepath()[:-(len(data.get_filename())+1)],os.path.basename(sys.argv[0])))
def r(list1, list2):
return [ np.sqrt(x**2+y**2) for x, y in zip(list1, list2) ]
#############################
# Measurement Parameters
#############################
# VNA sweep parameters
probe_center = 6.020*GHz
probe_span = 1*Hz
probe_start_freq = probe_center - probe_span/2
probe_stop_freq = probe_center + probe_span/2
probe_numpoints = 1
if_bw = 5*Hz
probe_start_power = -35 # 1+1 dB cable + 6 dir coupler
probe_stop_power = -35
probe_power_numpoint = 1
s_params = ['S21']
filename = raw_input('Filename : ')
avg_point = 1
# SMF sweep parameters
drive_start_freq = 4.4*GHz
drive_stop_freq = 4.6*GHz
resolution = 0.5*MHz
drive_numpoints = int(abs(drive_stop_freq - drive_start_freq)/resolution + 1)
drive_power = 0
#############################
# Initialize Instruments
#############################
znb = qt.instruments.create('ZNB20', 'RhodeSchwartz_ZNB20', address=ZNB20_ADDRESS, reset=True)
smf = qt.instruments.create('SMF100', 'RhodeSchwartz_SMF100', address = SMF100_ADDRESS, reset=True)
qs = qt.instruments.create('GS200', 'Yokogawa_GS200', address='USB0::0x0B21::0x0039::91T416206::INSTR')
rigol = qt.instruments.create('DP832A', 'Rigol_DP832A', address='TCPIP0::192.168.1.5::INSTR')
# setup SMF100 as source
smf.set_frequency(drive_start_freq)
smf.set_source_power(drive_power)
# Setup VNA as source
znb.set_external_reference(True)
znb.set_external_reference_frequency(10)
znb.set_start_frequency(probe_start_freq)
znb.set_stop_frequency(probe_stop_freq)
znb.set_numpoints(probe_numpoints)
znb.set_if_bandwidth(if_bw)
znb.set_source_power(probe_start_power)
znb.add_trace('S21')
# Turn on sources
znb.rf_on()
smf.rf_on()
# Test trigger
znb.send_trigger(wait=True)
# znb.autoscale()
go_on = raw_input('Continue? [y/n] ')
assert go_on.strip().upper() != 'N'
### SETTING UP DATA FILE
data=qt.Data(name=filename)
# data.add_comment('No. of repeated measurements for average is 60')
data.add_coordinate('Probe Power', units='dBm')
data.add_coordinate('Drive Frequency', units='Hz')
data.add_value('S21 real')
data.add_value('S21 imag')
data.add_value('S21 abs')
data.add_value('S21 phase')
drive_freq_array = np.linspace(drive_start_freq, drive_stop_freq, drive_numpoints)
probe_power_array = np.linspace(probe_start_power, probe_stop_power, probe_power_numpoint)
qt.mstart()
#traces_sum = [np.zeros(drive_numpoints), np.zeros(drive_numpoints)]
#num_avs = 0
once = True
for prob_power in probe_power_array:
start_time = time.time()
znb.set_source_power(prob_power)
power_list = np.linspace(prob_power, prob_power, num=drive_numpoints)
traces=[[],[],[],[]]
for index, drive_freq in enumerate(drive_freq_array):
# print('%d/%d' %(index+1,len(drive_freq_array)), end ='\r')
print('%d/%d'%(index+1,len(drive_freq_array)), end='\r')
traces_sum=[0,0,0,0]
smf.set_frequency(drive_freq)
for i in range(avg_point):
znb.send_trigger(wait=True)
trace = znb.get_data('S21')
traces_sum[0]+=np.real(trace)
traces_sum[1]+=np.imag(trace)
traces_sum[2]+=np.absolute(trace)
traces_sum[3]+=np.angle(trace)
traces[0].append(traces_sum[0][0]/avg_point)
traces[1].append(traces_sum[1][0]/avg_point)
traces[2].append(traces_sum[2][0]/avg_point)
traces[3].append(traces_sum[3][0]/avg_point)
end_time = time.time()
data.add_data_point(power_list, drive_freq_array, traces[0], traces[1], r(traces[0], traces[1]), traces[3])
generate_meta_file()
copy_script(once);once = False
print(end_time - start_time)
smf.rf_off()
# znb.rf_off()
#print num_avs
# drive_array = np.linspace(drive_freq, drive_freq, probe_numpoints)
# print np.array(traces[0]).shape
#create script in data directory
shutil.copy2('%s'%sys.argv[0],'%s/%s'%(data.get_filepath()[:-(len(filename)+11)],os.path.basename(sys.argv[0])))
# copy_script(sys.argv[0], filename)
data.close_file(sys.argv[0])
# qs.sweep_current(0, delay = 0.05)
# qs.set_output(False)
# znb.rf_off()
# rigol.output_off(2)
# smf.rf_off()
|
"""Stop condition for DiviK.
stop.py
Copyright 2018 Spectre Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from functools import partial
from multiprocessing import Pool
from typing import Tuple
import numpy as np
import spdivik.distance as dst
import spdivik.types as ty
import spdivik.score as sc
def minimal_size(data: ty.Data, size: int = 2) -> bool:
"""Check if region is smaller than predefined size."""
return data.shape[0] <= size
def _split_into_one(data: ty.Data) -> Tuple[ty.IntLabels, ty.Centroids]:
labels = np.zeros(shape=(data.shape[0],), dtype=int)
centroids = np.mean(data, axis=0, keepdims=True)
return labels, centroids
class combine:
"""Combine stop conditions to be checked together."""
def __init__(self, *args: ty.StopCondition):
self._conditions = args
def __call__(self, data: ty.Data) -> bool:
"""Check if there is any precaution for segmentation."""
return any(precaution(data) for precaution in self._conditions)
class Gap:
"""GAP statistic-based stop condition."""
def __init__(self, distance: dst.DistanceMetric,
split_into_two: ty.SegmentationMethod,
n_trials: int = 100, seed: int = 0, correction: bool=True,
pool: Pool = None):
self._split_into_two = split_into_two
self.correction = correction
adjusted_gap = partial(sc.gap, distance=distance, seed=seed,
n_trials=n_trials, pool=pool)
self._gap_of_two = partial(adjusted_gap, split=split_into_two)
self._gap_of_one = partial(adjusted_gap, split=_split_into_one)
def __call__(self, data: ty.Data) -> bool:
"""Check if segmentation is significantly justified."""
labels, centroids = self._split_into_two(data)
split_likelihood, split_deviation = self._gap_of_two(
data, labels, centroids, return_deviation=True)
labels, centroids = _split_into_one(data)
dont_split_likelihood = self._gap_of_one(data, labels, centroids)
if self.correction:
return split_likelihood + split_deviation < dont_split_likelihood
else:
return split_likelihood < dont_split_likelihood
|
# Generated by Django 2.1.2 on 2018-10-09 16:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20181009_1950'),
]
operations = [
migrations.RemoveField(
model_name='company',
name='product',
),
migrations.AddField(
model_name='company',
name='products',
field=models.ManyToManyField(through='app.CompanyProduct', to='app.Product', verbose_name='Предприятие'),
),
migrations.AlterField(
model_name='companyproduct',
name='company',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='app.Company', verbose_name='Предприятие'),
),
migrations.AlterField(
model_name='companyproduct',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='app.Product', verbose_name='Услуга\\товар'),
),
]
|
import os
import hvac
import json
import requests
__all__ = [
"VaultutilpyError",
"MissingEnvVar",
"AuthTokenRetrieval",
"SecretNotFound",
"TokenFileNotFound",
"in_cluster_client",
"in_cluster_secret",
]
class VaultutilpyError(Exception):
pass
class MissingEnvVar(VaultutilpyError):
pass
class AuthTokenRetrieval(VaultutilpyError):
pass
class SecretNotFound(VaultutilpyError):
pass
class TokenFileNotFound(VaultutilpyError):
pass
KUBERNETES_SERVICE_ACCOUNT_TOKEN_FILE = "/var/run/secrets/kubernetes.io/serviceaccount/token"
def in_cluster_client():
"""
in_cluster_client returns a vault (hvac) API client using environment variables passed
to pods within a kubernetes cluster
"""
for envvar in ["VAULT_ADDR", "VAULT_AUTH_PATH", "VAULT_ROLE"]:
if os.environ.get(envvar) is None:
raise MissingEnvVar("Missing Envvar: {0}".format(envvar))
vault_addr = os.environ.get("VAULT_ADDR")
vault_auth_path = os.environ.get("VAULT_AUTH_PATH")
vault_role = os.environ.get("VAULT_ROLE")
if not os.path.exists(KUBERNETES_SERVICE_ACCOUNT_TOKEN_FILE):
raise TokenFileNotFound(
"Could not find serviceaccount token at {0}".format(KUBERNETES_SERVICE_ACCOUNT_TOKEN_FILE)
)
with open(KUBERNETES_SERVICE_ACCOUNT_TOKEN_FILE, "rb") as fd:
jwt = fd.read()
payload = {"jwt": jwt.decode("utf-8"), "role": vault_role}
auth_url = "{0}/v1/auth/{1}/login".format(vault_addr, vault_auth_path)
headers = {"Content-type": "application/json", "Accept": "text/plain"}
json_payload = json.dumps(payload)
req = requests.post(auth_url, headers=headers, data=json_payload)
if req.status_code != requests.codes.ok:
raise AuthTokenRetrieval(
"Failed to receive auth token with code: {0} err: {1}".format(req.status_code, req.text)
)
client_token = req.json().get("auth", {}).get("client_token", False)
if not client_token:
raise AuthTokenRetrieval("Failed to parse auth token")
client = hvac.Client(url=vault_addr, token=client_token)
return client
def in_cluster_secret(path, field):
"""
in_cluster_secret is a helper function to retrieve a secret from vault from within kubernetes
"""
client = in_cluster_client()
secret = client.read(path)
if secret is None:
raise SecretNotFound("Could not find secret path at path: {0}, field: {1}".format(path, field))
val = secret.get(field, None)
if val is None:
raise SecretNotFound("Could not find secret field at path: {0}, field: {1}".format(path, field))
return val
|
#
# Copyright 2021 Lukas Schmelzeisen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import Optional, TypeVar
from jpype import JClass, JException, JObject # type: ignore
from wikidata_history_analyzer.datamodel.wikidata_revision import (
WikidataRevision,
WikidataRevisionProcessingException,
)
from wikidata_history_analyzer.jvm_manager import JvmManager
class WikidataRawRevisionWdtkDeserializationException(
WikidataRevisionProcessingException
):
pass
_T_WikidataRevision = TypeVar("_T_WikidataRevision", bound="WikidataRevision")
class WikidataRawRevision(WikidataRevision):
text: Optional[str]
def load_wdtk_deserialization(self, jvm_manager: JvmManager) -> JObject:
if self.text is None:
raise WikidataRawRevisionWdtkDeserializationException(
"Entity has no text.", self
)
_load_wdtk_classes_and_objects(jvm_manager)
assert _WDTK_JSON_SERIALIZER is not None # for mypy.
# The following is based on WDTK's WikibaseRevisionProcessor.
try:
if '"redirect":' in self.text:
return _WDTK_JSON_SERIALIZER.deserializeEntityRedirectDocument(
self.text
)
elif self.content_model == "wikibase-item":
return _WDTK_JSON_SERIALIZER.deserializeItemDocument(self.text)
elif self.content_model == "wikibase-property":
return _WDTK_JSON_SERIALIZER.deserializePropertyDocument(self.text)
else:
raise WikidataRawRevisionWdtkDeserializationException(
f"JSON deserialization of {self.content_model} not implemented by "
"Wikidata Toolkit.",
self,
)
except JException as exception:
raise WikidataRawRevisionWdtkDeserializationException(
"JSON deserialization by Wikidata Toolkit failed.", self, exception
)
_WDTK_JSON_SERIALIZER: Optional[JObject] = None
def _load_wdtk_classes_and_objects(_jvm_manager: JvmManager) -> None:
global _WDTK_JSON_SERIALIZER
if _WDTK_JSON_SERIALIZER is None:
_WDTK_JSON_SERIALIZER = JClass(
"org.wikidata.wdtk.datamodel.helpers.JsonDeserializer"
)(JClass("org.wikidata.wdtk.datamodel.helpers.Datamodel").SITE_WIKIDATA)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Han Xiao <artex.xh@gmail.com> <https://hanxiao.github.io>
import random
from datetime import datetime
import numpy as np
import zmq
from zmq.utils import jsonapi
class BertClient:
def __init__(self, ip='localhost', port=5555, output_fmt='ndarray'):
self.socket = zmq.Context().socket(zmq.REQ)
self.socket.identity = ('client-%d-%d' %
(datetime.now().timestamp(), random.randint(0, 999))).encode('ascii')
self.socket.connect('tcp://%s:%d' % (ip, port))
if output_fmt == 'ndarray':
self.formatter = lambda x: x
elif output_fmt == 'list':
self.formatter = lambda x: x.tolist()
else:
raise AttributeError('"output_fmt" must be "ndarray" or "list"')
def encode(self, texts):
if self.is_valid_input(texts):
self.socket.send_pyobj(texts)
response = self.socket.recv_multipart()
arr_info, arr_val = jsonapi.loads(response[0]), response[3]
X = np.frombuffer(memoryview(arr_val), dtype=arr_info['dtype'])
return self.formatter(X.reshape(arr_info['shape']))
else:
raise AttributeError('"texts" must be "List[str]"!')
@staticmethod
def is_valid_input(texts):
return isinstance(texts, list) and all(isinstance(s, str) for s in texts)
|
# Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Provides a window for configuring play options."""
from __future__ import annotations
from typing import TYPE_CHECKING
import _ba
import ba
from bastd.ui import popup
if TYPE_CHECKING:
from typing import Any, Type, Tuple, Optional, Union
class PlayOptionsWindow(popup.PopupWindow):
"""A popup window for configuring play options."""
def __init__(self,
sessiontype: Type[ba.Session],
playlist: str,
scale_origin: Tuple[float, float],
delegate: Any = None):
# FIXME: Tidy this up.
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
from ba.internal import (getclass, have_pro,
get_default_teams_playlist,
get_default_free_for_all_playlist,
filter_playlist)
from ba.internal import get_map_class
from bastd.ui.playlist import PlaylistTypeVars
self._r = 'gameListWindow'
self._delegate = delegate
self._pvars = PlaylistTypeVars(sessiontype)
self._transitioning_out = False
self._do_randomize_val = (ba.app.config.get(
self._pvars.config_name + ' Playlist Randomize', 0))
self._sessiontype = sessiontype
self._playlist = playlist
self._width = 500.0
self._height = 330.0 - 50.0
# In teams games, show the custom names/colors button.
if self._sessiontype is ba.DualTeamSession:
self._height += 50.0
self._row_height = 45.0
# Grab our maps to display.
model_opaque = ba.getmodel('level_select_button_opaque')
model_transparent = ba.getmodel('level_select_button_transparent')
mask_tex = ba.gettexture('mapPreviewMask')
# Poke into this playlist and see if we can display some of its maps.
map_textures = []
map_texture_entries = []
rows = 0
columns = 0
game_count = 0
scl = 0.35
c_width_total = 0.0
try:
max_columns = 5
name = playlist
if name == '__default__':
if self._sessiontype is ba.FreeForAllSession:
plst = get_default_free_for_all_playlist()
elif self._sessiontype is ba.DualTeamSession:
plst = get_default_teams_playlist()
else:
raise Exception('unrecognized session-type: ' +
str(self._sessiontype))
else:
try:
plst = ba.app.config[self._pvars.config_name +
' Playlists'][name]
except Exception:
print('ERROR INFO: self._config_name is:',
self._pvars.config_name)
print(
'ERROR INFO: playlist names are:',
list(ba.app.config[self._pvars.config_name +
' Playlists'].keys()))
raise
plst = filter_playlist(plst,
self._sessiontype,
remove_unowned=False,
mark_unowned=True)
game_count = len(plst)
for entry in plst:
mapname = entry['settings']['map']
maptype: Optional[Type[ba.Map]]
try:
maptype = get_map_class(mapname)
except Exception:
maptype = None
if maptype is not None:
tex_name = maptype.get_preview_texture_name()
if tex_name is not None:
map_textures.append(tex_name)
map_texture_entries.append(entry)
rows = (max(0, len(map_textures) - 1) // max_columns) + 1
columns = min(max_columns, len(map_textures))
if len(map_textures) == 1:
scl = 1.1
elif len(map_textures) == 2:
scl = 0.7
elif len(map_textures) == 3:
scl = 0.55
else:
scl = 0.35
self._row_height = 128.0 * scl
c_width_total = scl * 250.0 * columns
if map_textures:
self._height += self._row_height * rows
except Exception:
ba.print_exception('error listing playlist maps')
show_shuffle_check_box = game_count > 1
if show_shuffle_check_box:
self._height += 40
# Creates our _root_widget.
scale = (1.69 if ba.app.small_ui else 1.1 if ba.app.med_ui else 0.85)
super().__init__(position=scale_origin,
size=(self._width, self._height),
scale=scale)
playlist_name: Union[str, ba.Lstr] = (self._pvars.default_list_name
if playlist == '__default__' else
playlist)
self._title_text = ba.textwidget(parent=self.root_widget,
position=(self._width * 0.5,
self._height - 89 + 51),
size=(0, 0),
text=playlist_name,
scale=1.4,
color=(1, 1, 1),
maxwidth=self._width * 0.7,
h_align='center',
v_align='center')
self._cancel_button = ba.buttonwidget(
parent=self.root_widget,
position=(25, self._height - 53),
size=(50, 50),
scale=0.7,
label='',
color=(0.42, 0.73, 0.2),
on_activate_call=self._on_cancel_press,
autoselect=True,
icon=ba.gettexture('crossOut'),
iconscale=1.2)
h_offs_img = self._width * 0.5 - c_width_total * 0.5
v_offs_img = self._height - 118 - scl * 125.0 + 50
bottom_row_buttons = []
self._have_at_least_one_owned = False
for row in range(rows):
for col in range(columns):
tex_index = row * columns + col
if tex_index < len(map_textures):
tex_name = map_textures[tex_index]
h = h_offs_img + scl * 250 * col
v = v_offs_img - self._row_height * row
entry = map_texture_entries[tex_index]
owned = not (('is_unowned_map' in entry
and entry['is_unowned_map']) or
('is_unowned_game' in entry
and entry['is_unowned_game']))
if owned:
self._have_at_least_one_owned = True
try:
desc = getclass(entry['type'],
subclassof=ba.GameActivity
).get_config_display_string(entry)
if not owned:
desc = ba.Lstr(
value='${DESC}\n${UNLOCK}',
subs=[
('${DESC}', desc),
('${UNLOCK}',
ba.Lstr(
resource='unlockThisInTheStoreText'))
])
desc_color = (0, 1, 0) if owned else (1, 0, 0)
except Exception:
desc = ba.Lstr(value='(invalid)')
desc_color = (1, 0, 0)
btn = ba.buttonwidget(
parent=self.root_widget,
size=(scl * 240.0, scl * 120.0),
position=(h, v),
texture=ba.gettexture(tex_name if owned else 'empty'),
model_opaque=model_opaque if owned else None,
on_activate_call=ba.Call(ba.screenmessage, desc,
desc_color),
label='',
color=(1, 1, 1),
autoselect=True,
extra_touch_border_scale=0.0,
model_transparent=model_transparent if owned else None,
mask_texture=mask_tex if owned else None)
if row == 0 and col == 0:
ba.widget(edit=self._cancel_button, down_widget=btn)
if row == rows - 1:
bottom_row_buttons.append(btn)
if not owned:
# Ewww; buttons don't currently have alpha so in this
# case we draw an image over our button with an empty
# texture on it.
ba.imagewidget(parent=self.root_widget,
size=(scl * 260.0, scl * 130.0),
position=(h - 10.0 * scl,
v - 4.0 * scl),
draw_controller=btn,
color=(1, 1, 1),
texture=ba.gettexture(tex_name),
model_opaque=model_opaque,
opacity=0.25,
model_transparent=model_transparent,
mask_texture=mask_tex)
ba.imagewidget(parent=self.root_widget,
size=(scl * 100, scl * 100),
draw_controller=btn,
position=(h + scl * 70, v + scl * 10),
texture=ba.gettexture('lock'))
# Team names/colors.
self._custom_colors_names_button: Optional[ba.Widget]
if self._sessiontype is ba.DualTeamSession:
y_offs = 50 if show_shuffle_check_box else 0
self._custom_colors_names_button = ba.buttonwidget(
parent=self.root_widget,
position=(100, 200 + y_offs),
size=(290, 35),
on_activate_call=ba.WeakCall(self._custom_colors_names_press),
autoselect=True,
textcolor=(0.8, 0.8, 0.8),
label=ba.Lstr(resource='teamNamesColorText'))
if not have_pro():
ba.imagewidget(
parent=self.root_widget,
size=(30, 30),
position=(95, 202 + y_offs),
texture=ba.gettexture('lock'),
draw_controller=self._custom_colors_names_button)
else:
self._custom_colors_names_button = None
# Shuffle.
def _cb_callback(val: bool) -> None:
self._do_randomize_val = val
cfg = ba.app.config
cfg[self._pvars.config_name +
' Playlist Randomize'] = self._do_randomize_val
cfg.commit()
if show_shuffle_check_box:
self._shuffle_check_box = ba.checkboxwidget(
parent=self.root_widget,
position=(110, 200),
scale=1.0,
size=(250, 30),
autoselect=True,
text=ba.Lstr(resource=self._r + '.shuffleGameOrderText'),
maxwidth=300,
textcolor=(0.8, 0.8, 0.8),
value=self._do_randomize_val,
on_value_change_call=_cb_callback)
# Show tutorial.
try:
show_tutorial = ba.app.config['Show Tutorial']
except Exception:
show_tutorial = True
def _cb_callback_2(val: bool) -> None:
cfg = ba.app.config
cfg['Show Tutorial'] = val
cfg.commit()
self._show_tutorial_check_box = ba.checkboxwidget(
parent=self.root_widget,
position=(110, 151),
scale=1.0,
size=(250, 30),
autoselect=True,
text=ba.Lstr(resource=self._r + '.showTutorialText'),
maxwidth=300,
textcolor=(0.8, 0.8, 0.8),
value=show_tutorial,
on_value_change_call=_cb_callback_2)
# Grumble: current autoselect doesn't do a very good job
# with checkboxes.
if self._custom_colors_names_button is not None:
for btn in bottom_row_buttons:
ba.widget(edit=btn,
down_widget=self._custom_colors_names_button)
if show_shuffle_check_box:
ba.widget(edit=self._custom_colors_names_button,
down_widget=self._shuffle_check_box)
ba.widget(edit=self._shuffle_check_box,
up_widget=self._custom_colors_names_button)
else:
ba.widget(edit=self._custom_colors_names_button,
down_widget=self._show_tutorial_check_box)
ba.widget(edit=self._show_tutorial_check_box,
up_widget=self._custom_colors_names_button)
self._play_button = ba.buttonwidget(
parent=self.root_widget,
position=(70, 44),
size=(200, 45),
scale=1.8,
text_res_scale=1.5,
on_activate_call=self._on_play_press,
autoselect=True,
label=ba.Lstr(resource='playText'))
ba.widget(edit=self._play_button,
up_widget=self._show_tutorial_check_box)
ba.containerwidget(edit=self.root_widget,
start_button=self._play_button,
cancel_button=self._cancel_button,
selected_child=self._play_button)
# Update now and once per second.
self._update_timer = ba.Timer(1.0,
ba.WeakCall(self._update),
timetype=ba.TimeType.REAL,
repeat=True)
self._update()
def _custom_colors_names_press(self) -> None:
from ba.internal import have_pro
from bastd.ui import account as accountui
from bastd.ui import teamnamescolors
from bastd.ui import purchase
if not have_pro():
if _ba.get_account_state() != 'signed_in':
accountui.show_sign_in_prompt()
else:
purchase.PurchaseWindow(items=['pro'])
self._transition_out()
return
assert self._custom_colors_names_button
teamnamescolors.TeamNamesColorsWindow(
scale_origin=self._custom_colors_names_button.
get_screen_space_center())
def _does_target_playlist_exist(self) -> bool:
if self._playlist == '__default__':
return True
val: bool = self._playlist in ba.app.config.get(
self._pvars.config_name + ' Playlists', {})
assert isinstance(val, bool)
return val
def _update(self) -> None:
# All we do here is make sure our targeted playlist still exists,
# and close ourself if not.
if not self._does_target_playlist_exist():
self._transition_out()
def _transition_out(self, transition: str = 'out_scale') -> None:
if not self._transitioning_out:
self._transitioning_out = True
ba.containerwidget(edit=self.root_widget, transition=transition)
def on_popup_cancel(self) -> None:
ba.playsound(ba.getsound('swish'))
self._transition_out()
def _on_cancel_press(self) -> None:
self._transition_out()
def _on_play_press(self) -> None:
# Disallow if our playlist has disappeared.
if not self._does_target_playlist_exist():
return
# Disallow if we have no unlocked games.
if not self._have_at_least_one_owned:
ba.playsound(ba.getsound('error'))
ba.screenmessage(ba.Lstr(resource='playlistNoValidGamesErrorText'),
color=(1, 0, 0))
return
cfg = ba.app.config
cfg[self._pvars.config_name + ' Playlist Selection'] = self._playlist
cfg.commit()
_ba.fade_screen(False, endcall=self._run_selected_playlist)
_ba.lock_all_input()
self._transition_out(transition='out_left')
if self._delegate is not None:
self._delegate.on_play_options_window_run_game()
def _run_selected_playlist(self) -> None:
_ba.unlock_all_input()
try:
_ba.new_host_session(self._sessiontype)
except Exception:
from bastd import mainmenu
ba.print_exception('exception running session', self._sessiontype)
# Drop back into a main menu session.
_ba.new_host_session(mainmenu.MainMenuSession)
|
''' Tests for yarals.protocol module '''
import json
import pytest
from yarals import protocol
@pytest.mark.protocol
def test_diagnostic():
''' Ensure Diagnostic is properly encoded to JSON dictionaries '''
pos_dict = {"line": 10, "character": 15}
pos = protocol.Position(line=pos_dict["line"], char=pos_dict["character"])
rg_dict = {"start": pos_dict, "end": pos_dict}
rg_obj = protocol.Range(start=pos, end=pos)
diag_dict = {
"message": "Test Diagnostic",
"range": rg_dict,
"relatedInformation": [],
"severity": 1
}
diag = protocol.Diagnostic(
locrange=rg_obj,
message=diag_dict["message"],
severity=diag_dict["severity"]
)
assert json.dumps(diag, cls=protocol.JSONEncoder) == json.dumps(diag_dict)
@pytest.mark.protocol
def test_completionitem():
''' Ensure CompletionItem is properly encoded to JSON dictionaries '''
comp_dict = {"label": "test", "kind": protocol.CompletionItemKind.CLASS}
comp = protocol.CompletionItem(label=comp_dict["label"], kind=comp_dict["kind"])
assert json.dumps(comp, cls=protocol.JSONEncoder) == json.dumps(comp_dict)
@pytest.mark.protocol
def test_location():
''' Ensure Location is properly encoded to JSON dictionaries '''
pos_dict = {"line": 10, "character": 15}
pos = protocol.Position(line=pos_dict["line"], char=pos_dict["character"])
rg_dict = {"start": pos_dict, "end": pos_dict}
rg_obj = protocol.Range(start=pos, end=pos)
loc_dict = {"range": rg_dict, "uri": "fake:///one/two/three/four.path"}
loc = protocol.Location(
locrange=rg_obj,
uri=loc_dict["uri"]
)
assert json.dumps(loc, cls=protocol.JSONEncoder) == json.dumps(loc_dict)
@pytest.mark.protocol
def test_position():
''' Ensure Position is properly encoded to JSON dictionaries '''
pos_dict = {"line": 10, "character": 15}
pos = protocol.Position(line=pos_dict["line"], char=pos_dict["character"])
assert json.dumps(pos, cls=protocol.JSONEncoder) == json.dumps(pos_dict)
@pytest.mark.protocol
def test_range():
''' Ensure Range is properly encoded to JSON dictionaries '''
pos_dict = {"line": 10, "character": 15}
pos = protocol.Position(line=pos_dict["line"], char=pos_dict["character"])
rg_dict = {"start": pos_dict, "end": pos_dict}
rg_obj = protocol.Range(
start=pos,
end=pos
)
assert json.dumps(rg_obj, cls=protocol.JSONEncoder) == json.dumps(rg_dict)
|
import sys
import os
sys.path.append('../utils')
from load import *
from msdi_io import *
from KbyK import *
from skimage.feature import hog
from skimage.color import rgb2gray
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
#msdi_path = '/home/mrb8/Bureau/M2/SAM/projet_v2/datas/msdi'
class batchLoaderHOG(batchLoader):
# @Override
def loadBatch(self):
batch_size = min(self.i+self.batch_size,self.max_size)
X = []
y = []
for i_batch in range(batch_size):
if i_batch%10 == 0 :
os.system('cls' if os.name == 'nt' else "printf '\033c'")
print(str(i_batch)+"/"+str(self.batch_size))
entry_idx = self.i + i_batch
one_entry = self.msdi.loc[entry_idx]
img = load_img(one_entry, self.path_msdi)
genre = get_label(one_entry)
fd = hog(rgb2gray(img), orientations=4,pixels_per_cell=(199/3, 199/3), visualize=False)
y.append(genre)
X.append(fd)
self.i += i_batch
return np.array(X),y
class batchLoader3x3(batchLoader):
# @Override
def loadBatch(self):
k=3
batch_size = min(self.i+self.batch_size,self.max_size)
X = []
y = []
for i_batch in range(batch_size):
if i_batch%10 == 0 :
os.system('cls' if os.name == 'nt' else "printf '\033c'")
print(str(i_batch)+"/"+str(self.batch_size))
entry_idx = self.i + i_batch
one_entry = self.msdi.loc[entry_idx]
img = load_img(one_entry, self.path_msdi)
genre = get_label(one_entry)
X.append(np.array([turnkBykNaive(img,k),turnkbykClean(img,k),turnkBykMean(img,k)]).reshape(-1))
y.append(genre)
self.i += i_batch
return np.array(X),y
if __name__ == '__main__':
print('Labels:', get_label_list())
bl3 = batchLoader3x3(3000,path_msdi=msdi_path)
blH = batchLoaderHOG(3000,path_msdi=msdi_path)
clf = SVC(C=10,kernel='rbf')
print('='*10,"3x3",'='*10)
batch = bl3.loadBatch()
X_3x3,y = batch[0],batch[1]
print(cross_val_score(clf,X_3x3,y,cv=5))
print('='*10,"HOG",'='*10)
batch = blH.loadBatch()
X_HOG,y = batch[0],batch[1]
print(cross_val_score(clf,X_HOG,y,cv=5))
print(cross_val_score(clf,np.hstack((X_HOG,X_3x3)),y,cv=5))
|
def inc(x):
def incx(y):
return x + y
return incx
# 函数柯里化
# 使用inc函数来构造各种版本的inc函数
# 1. 把函数当做变量来使用,关注描述问题而不是怎么实现,这样让让代码更易读
# 2. 因为函数返回里面的函数,所以函数关注的是表达式,关注的是描述这个问题,而不是怎么实现这个事情
inc1 = inc(2)
inc2 = inc(5)
print(inc1(2))
print(inc2(5))
|
# Simple test to ensure that we can load the xapian module and exercise basic
# functionality successfully.
#
# Copyright (C) 2004,2005,2006,2007,2008,2010,2011,2015 Olly Betts
# Copyright (C) 2007 Lemur Consulting Ltd
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
import sys
import xapian
from testsuite import *
mystemmers = set()
mystemmer_id = 0
# Stemmer which strips English vowels.
class MyStemmer(xapian.StemImplementation):
def __init__(self):
global mystemmers
global mystemmer_id
super(MyStemmer, self).__init__()
mystemmers.add(mystemmer_id)
self._id = mystemmer_id
mystemmer_id += 1
def __call__(self, s):
import re
return re.sub(r'[aeiou]', '', s)
def __del__(self):
global mystemmers
if self._id not in mystemmers:
raise TestFail("MyStemmer #%d deleted more than once" % self._id)
mystemmers.remove(self._id)
def test_all():
# Test the version number reporting functions give plausible results.
v = "%d.%d.%d" % (xapian.major_version(),
xapian.minor_version(),
xapian.revision())
v2 = xapian.version_string()
expect(v2, v, "Unexpected version output")
# A regexp check would be better, but seems to create a bogus "leak" of -1
# objects in Python 3.
expect(len(xapian.__version__.split('.')), 3, 'xapian.__version__ not X.Y.Z')
expect((xapian.__version__.split('.'))[0], '1', 'xapian.__version__ not "1.Y.Z"')
def access_cvar():
return xapian.cvar
# Check that SWIG isn't generating cvar (regression test for ticket#297).
expect_exception(AttributeError, "'module' object has no attribute 'cvar'",
access_cvar)
stem = xapian.Stem("english")
expect(str(stem), "Xapian::Stem(english)", "Unexpected str(stem)")
doc = xapian.Document()
doc.set_data("a\0b")
if doc.get_data() == "a":
raise TestFail("get_data+set_data truncates at a zero byte")
expect(doc.get_data(), "a\0b", "get_data+set_data doesn't transparently handle a zero byte")
doc.set_data("is there anybody out there?")
doc.add_term("XYzzy")
doc.add_posting(stem("is"), 1)
doc.add_posting(stem("there"), 2)
doc.add_posting(stem("anybody"), 3)
doc.add_posting(stem("out"), 4)
doc.add_posting(stem("there"), 5)
db = xapian.inmemory_open()
db.add_document(doc)
expect(db.get_doccount(), 1, "Unexpected db.get_doccount()")
terms = ["smoke", "test", "terms"]
expect_query(xapian.Query(xapian.Query.OP_OR, terms),
"(smoke OR test OR terms)")
query1 = xapian.Query(xapian.Query.OP_PHRASE, ("smoke", "test", "tuple"))
query2 = xapian.Query(xapian.Query.OP_XOR, (xapian.Query("smoke"), query1, "string"))
expect_query(query1, "(smoke PHRASE 3 test PHRASE 3 tuple)")
expect_query(query2, "(smoke XOR (smoke PHRASE 3 test PHRASE 3 tuple) XOR string)")
subqs = ["a", "b"]
expect_query(xapian.Query(xapian.Query.OP_OR, subqs), "(a OR b)")
expect_query(xapian.Query(xapian.Query.OP_VALUE_RANGE, 0, '1', '4'),
"VALUE_RANGE 0 1 4")
expect_query(xapian.Query.MatchAll, "<alldocuments>")
expect_query(xapian.Query.MatchNothing, "")
# Feature test for Query.__iter__
term_count = 0
for term in query2:
term_count += 1
expect(term_count, 4, "Unexpected number of terms in query2")
enq = xapian.Enquire(db)
enq.set_query(xapian.Query(xapian.Query.OP_OR, "there", "is"))
mset = enq.get_mset(0, 10)
expect(mset.size(), 1, "Unexpected mset.size()")
expect(len(mset), 1, "Unexpected mset.size()")
# Feature test for Enquire.matching_terms(docid)
term_count = 0
for term in enq.matching_terms(mset.get_hit(0)):
term_count += 1
expect(term_count, 2, "Unexpected number of matching terms")
# Feature test for MSet.__iter__
msize = 0
for match in mset:
msize += 1
expect(msize, mset.size(), "Unexpected number of entries in mset")
terms = " ".join(enq.matching_terms(mset.get_hit(0)))
expect(terms, "is there", "Unexpected terms")
# Feature test for ESet.__iter__
rset = xapian.RSet()
rset.add_document(1)
eset = enq.get_eset(10, rset)
term_count = 0
for term in eset:
term_count += 1
expect(term_count, 3, "Unexpected number of expand terms")
# Feature test for Database.__iter__
term_count = 0
for term in db:
term_count += 1
expect(term_count, 5, "Unexpected number of terms in db")
# Feature test for Database.allterms
term_count = 0
for term in db.allterms():
term_count += 1
expect(term_count, 5, "Unexpected number of terms in db.allterms")
# Feature test for Database.postlist
count = 0
for posting in db.postlist("there"):
count += 1
expect(count, 1, "Unexpected number of entries in db.postlist('there')")
# Feature test for Database.postlist with empty term (alldocspostlist)
count = 0
for posting in db.postlist(""):
count += 1
expect(count, 1, "Unexpected number of entries in db.postlist('')")
# Feature test for Database.termlist
count = 0
for term in db.termlist(1):
count += 1
expect(count, 5, "Unexpected number of entries in db.termlist(1)")
# Feature test for Database.positionlist
count = 0
for term in db.positionlist(1, "there"):
count += 1
expect(count, 2, "Unexpected number of entries in db.positionlist(1, 'there')")
# Feature test for Document.termlist
count = 0
for term in doc.termlist():
count += 1
expect(count, 5, "Unexpected number of entries in doc.termlist()")
# Feature test for TermIter.skip_to
term = doc.termlist()
term.skip_to('n')
while True:
try:
x = next(term)
except StopIteration:
break
if x.term < 'n':
raise TestFail("TermIter.skip_to didn't skip term '%s'" % x.term)
# Feature test for Document.values
count = 0
for term in doc.values():
count += 1
expect(count, 0, "Unexpected number of entries in doc.values")
# Check exception handling for Xapian::DocNotFoundError
expect_exception(xapian.DocNotFoundError, "Docid 3 not found", db.get_document, 3)
# Check value of OP_ELITE_SET
expect(xapian.Query.OP_ELITE_SET, 10, "Unexpected value for OP_ELITE_SET")
# Feature test for MatchDecider
doc = xapian.Document()
doc.set_data("Two")
doc.add_posting(stem("out"), 1)
doc.add_posting(stem("outside"), 1)
doc.add_posting(stem("source"), 2)
doc.add_value(0, "yes")
db.add_document(doc)
class testmatchdecider(xapian.MatchDecider):
def __call__(self, doc):
return doc.get_value(0) == "yes"
query = xapian.Query(stem("out"))
enquire = xapian.Enquire(db)
enquire.set_query(query)
mset = enquire.get_mset(0, 10, None, testmatchdecider())
expect(mset.size(), 1, "Unexpected number of documents returned by match decider")
expect(mset.get_docid(0), 2, "MatchDecider mset has wrong docid in")
# Feature test for ExpandDecider
class testexpanddecider(xapian.ExpandDecider):
def __call__(self, term):
return (not term.startswith('a'))
enquire = xapian.Enquire(db)
rset = xapian.RSet()
rset.add_document(1)
eset = enquire.get_eset(10, rset, xapian.Enquire.USE_EXACT_TERMFREQ, 1.0, testexpanddecider())
eset_terms = [term[xapian.ESET_TNAME] for term in eset.items]
expect(len(eset_terms), eset.size(), "Unexpected number of terms returned by expand")
if [t for t in eset_terms if t.startswith('a')]:
raise TestFail("ExpandDecider was not used")
# Check min_wt argument to get_eset() works (new in 1.2.5).
eset = enquire.get_eset(100, rset, xapian.Enquire.USE_EXACT_TERMFREQ)
expect(eset.items[-1][xapian.ESET_WT] < 1.9, True, "test get_eset() without min_wt")
eset = enquire.get_eset(100, rset, xapian.Enquire.USE_EXACT_TERMFREQ, 1.0, None, 1.9)
expect(eset.items[-1][xapian.ESET_WT] >= 1.9, True, "test get_eset() min_wt")
# Check QueryParser parsing error.
qp = xapian.QueryParser()
expect_exception(xapian.QueryParserError, "Syntax: <expression> AND <expression>", qp.parse_query, "test AND")
# Check QueryParser pure NOT option
qp = xapian.QueryParser()
expect_query(qp.parse_query("NOT test", qp.FLAG_BOOLEAN + qp.FLAG_PURE_NOT),
"(<alldocuments> AND_NOT test:(pos=1))")
# Check QueryParser partial option
qp = xapian.QueryParser()
qp.set_database(db)
qp.set_default_op(xapian.Query.OP_AND)
qp.set_stemming_strategy(qp.STEM_SOME)
qp.set_stemmer(xapian.Stem('en'))
expect_query(qp.parse_query("foo o", qp.FLAG_PARTIAL),
"(Zfoo:(pos=1) AND ((out:(pos=2) SYNONYM outsid:(pos=2)) OR Zo:(pos=2)))")
expect_query(qp.parse_query("foo outside", qp.FLAG_PARTIAL),
"(Zfoo:(pos=1) AND Zoutsid:(pos=2))")
# Test supplying unicode strings
expect_query(xapian.Query(xapian.Query.OP_OR, (u'foo', u'bar')),
'(foo OR bar)')
expect_query(xapian.Query(xapian.Query.OP_OR, ('foo', u'bar\xa3')),
'(foo OR bar\xc2\xa3)')
expect_query(xapian.Query(xapian.Query.OP_OR, ('foo', 'bar\xc2\xa3')),
'(foo OR bar\xc2\xa3)')
expect_query(xapian.Query(xapian.Query.OP_OR, u'foo', u'bar'),
'(foo OR bar)')
expect_query(qp.parse_query(u"NOT t\xe9st", qp.FLAG_BOOLEAN + qp.FLAG_PURE_NOT),
"(<alldocuments> AND_NOT Zt\xc3\xa9st:(pos=1))")
doc = xapian.Document()
doc.set_data(u"Unicode with an acc\xe9nt")
doc.add_posting(stem(u"out\xe9r"), 1)
expect(doc.get_data(), u"Unicode with an acc\xe9nt".encode('utf-8'))
term = doc.termlist().next().term
expect(term, u"out\xe9r".encode('utf-8'))
# Check simple stopper
stop = xapian.SimpleStopper()
qp.set_stopper(stop)
expect(stop('a'), False)
expect_query(qp.parse_query(u"foo bar a", qp.FLAG_BOOLEAN),
"(Zfoo:(pos=1) AND Zbar:(pos=2) AND Za:(pos=3))")
stop.add('a')
expect(stop('a'), True)
expect_query(qp.parse_query(u"foo bar a", qp.FLAG_BOOLEAN),
"(Zfoo:(pos=1) AND Zbar:(pos=2))")
# Feature test for custom Stopper
class my_b_stopper(xapian.Stopper):
def __call__(self, term):
return term == "b"
def get_description(self):
return u"my_b_stopper"
stop = my_b_stopper()
expect(stop.get_description(), u"my_b_stopper")
qp.set_stopper(stop)
expect(stop('a'), False)
expect_query(qp.parse_query(u"foo bar a", qp.FLAG_BOOLEAN),
"(Zfoo:(pos=1) AND Zbar:(pos=2) AND Za:(pos=3))")
expect(stop('b'), True)
expect_query(qp.parse_query(u"foo bar b", qp.FLAG_BOOLEAN),
"(Zfoo:(pos=1) AND Zbar:(pos=2))")
# Test TermGenerator
termgen = xapian.TermGenerator()
doc = xapian.Document()
termgen.set_document(doc)
termgen.index_text('foo bar baz foo')
expect([(item.term, item.wdf, [pos for pos in item.positer]) for item in doc.termlist()], [('bar', 1, [2]), ('baz', 1, [3]), ('foo', 2, [1, 4])])
# Check DateValueRangeProcessor works
context("checking that DateValueRangeProcessor works")
qp = xapian.QueryParser()
vrpdate = xapian.DateValueRangeProcessor(1, 1, 1960)
qp.add_valuerangeprocessor(vrpdate)
query = qp.parse_query('12/03/99..12/04/01')
expect(str(query), 'Xapian::Query(VALUE_RANGE 1 19991203 20011204)')
# Regression test for bug#193, fixed in 1.0.3.
context("running regression test for bug#193")
vrp = xapian.NumberValueRangeProcessor(0, '$', True)
a = '$10'
b = '20'
slot, a, b = vrp(a, b)
expect(slot, 0)
expect(xapian.sortable_unserialise(a), 10)
expect(xapian.sortable_unserialise(b), 20)
# Regression tests copied from PHP (probably always worked in python, but
# let's check...)
context("running regression tests for issues which were found in PHP")
# PHP overload resolution involving boolean types failed.
enq.set_sort_by_value(1, True)
# Regression test - fixed in 0.9.10.1.
oqparser = xapian.QueryParser()
oquery = oqparser.parse_query("I like tea")
# Regression test for bug#192 - fixed in 1.0.3.
enq.set_cutoff(100)
# Test setting and getting metadata
expect(db.get_metadata('Foo'), '')
db.set_metadata('Foo', 'Foo')
expect(db.get_metadata('Foo'), 'Foo')
expect_exception(xapian.InvalidArgumentError, "Empty metadata keys are invalid", db.get_metadata, '')
expect_exception(xapian.InvalidArgumentError, "Empty metadata keys are invalid", db.set_metadata, '', 'Foo')
expect_exception(xapian.InvalidArgumentError, "Empty metadata keys are invalid", db.get_metadata, '')
# Test OP_SCALE_WEIGHT and corresponding constructor
expect_query(xapian.Query(xapian.Query.OP_SCALE_WEIGHT, xapian.Query('foo'), 5),
"5 * foo")
def test_userstem():
mystem = MyStemmer()
stem = xapian.Stem(mystem)
expect(stem('test'), 'tst')
stem2 = xapian.Stem(mystem)
expect(stem2('toastie'), 'tst')
indexer = xapian.TermGenerator()
indexer.set_stemmer(xapian.Stem(MyStemmer()))
doc = xapian.Document()
indexer.set_document(doc)
indexer.index_text('hello world')
s = '/'
for t in doc.termlist():
s += t.term
s += '/'
expect(s, '/Zhll/Zwrld/hello/world/')
parser = xapian.QueryParser()
parser.set_stemmer(xapian.Stem(MyStemmer()))
parser.set_stemming_strategy(xapian.QueryParser.STEM_ALL)
expect_query(parser.parse_query('color television'), '(clr:(pos=1) OR tlvsn:(pos=2))')
def test_internals_not_wrapped():
internals = []
for c in dir(xapian):
# Skip Python stuff like __file__ and __version__.
if c.startswith('__'): continue
if c.endswith('_'): internals.append(c)
# Skip non-classes
if not c[0].isupper(): continue
cls = eval('xapian.' + c)
if type(cls) != type(object): continue
for m in dir(cls):
if m.startswith('__'): continue
if m.endswith('_'): internals.append(c + '.' + m)
expect(internals, [])
def test_zz9_check_leaks():
import gc
gc.collect()
if len(mystemmers):
TestFail("%d MyStemmer objects not deleted" % len(mystemmers))
# Run all tests (ie, callables with names starting "test_").
if not runtests(globals()):
sys.exit(1)
# vim:syntax=python:set expandtab:
|
"""
Implementation of the 'pathological photo-z PDF estimator,
as used in arXiv:2001.03621 (see section 3.3). It assigns each test set galaxy
a photo-z PDF equal to the normalized redshift distribution
N (z) of the training set.
"""
import numpy as np
from ceci.config import StageParameter as Param
from rail.estimation.estimator import Estimator, Informer
import qp
class trainZmodel:
"""
Temporary class to store the single trainZ pdf for trained model.
Given how simple this is to compute, this seems like overkill.
"""
def __init__(self, zgrid, pdf, zmode):
self.zgrid = zgrid
self.pdf = pdf
self.zmode = zmode
class Train_trainZ(Informer):
"""Train an Estimator which returns a global PDF for all galaxies
"""
name = 'Train_trainZ'
config_options = Informer.config_options.copy()
config_options.update(zmin=Param(float, 0.0, msg="The minimum redshift of the z grid"),
zmax=Param(float, 3.0, msg="The maximum redshift of the z grid"),
nzbins=Param(int, 301, msg="The number of gridpoints in the z grid"))
def __init__(self, args, comm=None):
Informer.__init__(self, args, comm=comm)
def run(self):
if self.config.hdf5_groupname:
training_data = self.get_data('input')[self.config.hdf5_groupname]
else: #pragma: no cover
training_data = self.get_data('input')
zbins = np.linspace(self.config.zmin, self.config.zmax, self.config.nzbins+1)
speczs = np.sort(training_data['redshift'])
train_pdf, _ = np.histogram(speczs, zbins)
midpoints = zbins[:-1] + np.diff(zbins)/2
zmode = midpoints[np.argmax(train_pdf)]
cdf = np.cumsum(train_pdf)
cdf = cdf / cdf[-1]
norm = cdf[-1]*(zbins[2]-zbins[1])
train_pdf = train_pdf/norm
zgrid = midpoints
self.model = trainZmodel(zgrid, train_pdf, zmode)
self.add_data('model', self.model)
class TrainZ(Estimator):
"""Estimator which returns a global PDF for all galaxies
"""
name = 'TrainZ'
config_options = Estimator.config_options.copy()
config_options.update(zmin=Param(float, 0.0, msg="The minimum redshift of the z grid"),
zmax=Param(float, 3.0, msg="The maximum redshift of the z grid"),
nzbins=Param(int, 301, msg="The number of gridpoints in the z grid"))
def __init__(self, args, comm=None):
self.zgrid = None
self.train_pdf = None
self.zmode = None
Estimator.__init__(self, args, comm=comm)
def open_model(self, **kwargs):
Estimator.open_model(self, **kwargs)
if self.model is None: #pragma: no cover
return
self.zgrid = self.model.zgrid
self.train_pdf = self.model.pdf
self.zmode = self.model.zmode
def run(self):
if self.config.hdf5_groupname:
test_data = self.get_data('input')[self.config.hdf5_groupname]
else: #pragma: no cover
test_data = self.get_data('input')
test_size = len(test_data['mag_i_lsst'])
zmode = np.repeat(self.zmode, test_size)
qp_d = qp.Ensemble(qp.interp,
data=dict(xvals=self.zgrid, yvals=np.tile(self.train_pdf, (test_size, 1))))
qp_d.set_ancil(dict(zmode=zmode))
self.add_data('output', qp_d)
|
# KicadModTree is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KicadModTree is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
#
# (C) 2016 by Thomas Pointhuber, <thomas.pointhuber@gmx.at>
from KicadModTree.Vector import *
from KicadModTree.nodes.Node import Node
from KicadModTree.nodes.specialized import RectLine
from KicadModTree.nodes.specialized import RectFill
class FilledRect(Node):
r"""Add a Filled Rect to the render tree
Combines ``RectLine`` and ``RectFill`` into one class for simpler handling
:param \**kwargs:
See below
:Keyword Arguments:
* *start* (``Vector2D``) --
start edge of the rect
* *end* (``Vector2D``) --
end edge of the rect
* *layer* (``str``) --
layer on which the rect is drawn (default: 'F.SilkS')
* *width* (``float``) --
width of the outer line (default: 0.15)
:Example:
>>> from KicadModTree import *
>>> FilledRect(start=[-3, -2], end=[3, 2], layer='F.SilkS')
"""
def __init__(self, **kwargs):
Node.__init__(self)
self.start_pos = Vector2D(kwargs['start'])
self.end_pos = Vector2D(kwargs['end'])
self.layer = kwargs.get('layer', 'F.SilkS')
self.width = kwargs.get('width', 0.12) # TODO: better variation to get line width
rect_line = RectLine(**kwargs)
rect_line._parent = self
rect_fill = RectFill(**kwargs)
rect_fill._parent = self
self.virtual_childs = [rect_line, rect_fill]
def getVirtualChilds(self):
return self.virtual_childs
def _getRenderTreeText(self):
render_text = Node._getRenderTreeText(self)
render_string = ['start: [x: {sx}, y: {sy}]'.format(sx=self.start_pos.x, sy=self.start_pos.y),
'end: [x: {ex}, y: {ey}]'.format(ex=self.end_pos.x, ey=self.end_pos.y)]
render_text += " [{}]".format(", ".join(render_string))
return render_text
|
"""
Creates an HTTP server with basic websocket communication.
"""
import argparse
from datetime import datetime
import json
import os
import traceback
import webbrowser
import tornado.web
import tornado.websocket
import tornado.escape
import tornado.ioloop
import tornado.locks
from tornado.web import url
import methods
import logging
from distutils.dir_util import copy_tree
from distutils.dir_util import remove_tree
from distutils.dir_util import mkpath
import time
# global variables...
# epnmipaddr = args.epnm_ipaddr
# epnmuser = args.epnm_user
# epnmpassword = args.epnm_pass
epnmipaddr = "10.201.1.246"
baseURL = "https://" + epnmipaddr + "/restconf"
epnmuser = "root"
epnmpassword = "Public123"
open_websockets = []
global_region = 1
class IndexHandler(tornado.web.RequestHandler):
async def get(self):
self.render("templates/index.html", port=args.port, epnm_ip=epnmipaddr, epnm_user=epnmuser,
epnm_pass=epnmpassword, region=global_region)
class AjaxHandler(tornado.web.RequestHandler):
async def post(self):
global global_region
global epnmipaddr
global baseURL
global epnmuser
global epnmpassword
request_body = self.request.body.decode("utf-8")
# request = tornado.escape.recursive_unicode(self.request.arguments)
logging.info("Received AJAX request..")
logging.info(request_body)
request = json.loads(request_body)
try:
action = request['action']
except Exception as err:
logging.warning("Invalid AJAX request")
logging.warning(err)
response = {'status': 'failed', 'error': err}
logging.info(response)
self.write(json.dumps(response))
return
if action == 'collect':
methods.collection(self, request, global_region, baseURL, epnmuser, epnmpassword)
elif action == 'assign-srrg':
methods.assign_srrg(self, request, global_region, baseURL, epnmuser, epnmpassword)
elif action == 'unassign-srrg':
methods.unassign_srrg(self, request, global_region, baseURL, epnmuser, epnmpassword)
elif action == 'delete-srrg':
result = methods.delete_srrg(request, global_region, baseURL, epnmuser, epnmpassword)
self.write(json.dumps(result))
elif action == 'get-all-srrgs':
all_srrgs = methods.getallsrrgs()
self.write(json.dumps(all_srrgs))
elif action == 'get-l1nodes':
l1nodes = methods.getl1nodes()
self.write(json.dumps(l1nodes))
elif action == 'get-l1links':
l1links = methods.getl1links()
self.write(json.dumps(l1links))
elif action == 'get-topolinks':
node_name = request['l1node']
psline = request['psline']
topolinks = methods.gettopolinks_psline(node_name, psline)
self.write(json.dumps(topolinks))
elif action == 'get-topolinks-line-card':
node_name = request['mplsnode']
topolinks = methods.gettopolinks_mpls_node(node_name)
self.write(json.dumps(topolinks))
elif action == 'update-epnm':
time.sleep(2)
epnmipaddr = request['epnm-ip']
baseURL = "https://" + epnmipaddr + "/restconf"
epnmuser = request['epnm-user']
epnmpassword = request['epnm-pass']
region = request['region']
region_int = int(region)
global_region = region_int
response = {'action': 'update-epnm', 'status': 'completed'}
logging.info(response)
self.write(json.dumps(response))
else:
logging.warning("Received request for unknown operation!")
response = {'status': 'unknown', 'error': "unknown request"}
logging.info(response)
self.write(json.dumps(response))
def send_message_open_ws(self, message):
for ws in open_websockets:
ws.send_message(message)
class SRLGHandler(tornado.web.RequestHandler):
def get(self, srlg_num):
srlg = methods.getsrlg(srlg_num)
self.render("templates/srlg_template.html", port=args.port, srlg_num=srlg_num, srlg_data=srlg)
class ROADMNodesHandler(tornado.web.RequestHandler):
def get(self):
l1nodes = methods.getl1nodes()
pools = methods.get_srrg_pools(1)
# if len(pools) == 0:
# pools = ['No Node SRLG Pools Defined']
self.render("templates/roadm_nodes_template.html", port=args.port, l1nodes_data=l1nodes, pools=pools)
class ROADMLinksHandler(tornado.web.RequestHandler):
def get(self):
# full_url = self.request.full_url()
# uri = self.request.uri
# base_full_url = self.request.protocol + "://" + self.request.host
l1links = methods.getl1links()
conduit_pools = methods.get_srrg_pools(0)
degree_pools = methods.get_srrg_pools(2)
self.render("templates/roadm_links_template.html", port=args.port, degree_pools=degree_pools,
conduit_pools=conduit_pools, l1links_data=l1links)
class MPLSNodesHandler(tornado.web.RequestHandler):
def get(self):
mpls_nodes = methods.getmplsnodes()
self.render("templates/mpls_nodes_template.html", port=args.port, mpls_nodes_data=mpls_nodes)
class AllSRLGHandler(tornado.web.RequestHandler):
def get(self):
all_srrg_data = methods.getallsrrgs()
self.render("templates/all_srlg_template.html", port=args.port, all_srrg_data=all_srrg_data)
class AddDropTopoLinksHandler(tornado.web.RequestHandler):
def get(self):
l1node = self.get_argument('l1node')
psline = self.get_argument('psline')
topo_links = methods.gettopolinks_psline(l1node, psline)
add_drop_pools = methods.get_srrg_pools(3)
self.render("templates/topo_links_template_add_drop.html", port=args.port, topo_links_data=topo_links,
add_drop_pools=add_drop_pools, l1node=l1node)
class LineCardTopoLinksHandler(tornado.web.RequestHandler):
def get(self):
thequery = self.request.query
mplsnode = thequery.split('=')[1]
topo_links = methods.gettopolinks_mpls_node(mplsnode)
card_pools = methods.get_srrg_pools(6)
self.render("templates/topo_links_template_line_card.html", port=args.port, topo_links_data=topo_links,
card_pools=card_pools)
class WebSocket(tornado.websocket.WebSocketHandler):
def open(self):
logging.info("WebSocket opened")
open_websockets.append(self)
def send_message(self, message):
self.write_message(message)
def on_message(self, message):
"""Evaluates the function pointed to by json-rpc."""
json_rpc = json.loads(message)
logging.info("Websocket received message: " + json.dumps(json_rpc))
try:
result = getattr(methods,
json_rpc["method"])(**json_rpc["params"])
error = None
except:
# Errors are handled by enabling the `error` flag and returning a
# stack trace. The client can do with it what it will.
result = traceback.format_exc()
error = 1
json_rpc_response = json.dumps({"result": result, "error": error,
"id": json_rpc["id"]},
separators=(",", ":"))
logging.info("Websocket replied with message: " + json_rpc_response)
self.write_message(json_rpc_response)
def on_close(self):
open_websockets.remove(self)
logging.info("WebSocket closed!")
def main():
# Set up logging
try:
os.remove('collection.log')
except Exception as err:
logging.info("No log file to delete...")
logFormatter = logging.Formatter('%(levelname)s: %(message)s')
rootLogger = logging.getLogger()
rootLogger.level = logging.INFO
fileHandler = logging.FileHandler(filename='collection.log')
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
logging.info("Starting webserver...")
current_time = str(datetime.now().strftime('%Y-%m-%d-%H%M-%S'))
logging.info("Current time is: " + current_time)
settings = {
# "static_path": os.path.join(os.path.dirname(__file__), "static"),
"static_path": os.path.normpath(os.path.dirname(__file__)),
# "cookie_secret": "__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
# "login_url": "/login",
# "xsrf_cookies": True,
}
handlers = [url(r"/", IndexHandler, name="home"),
url(r"/websocket", WebSocket),
url(r'/static/(.*)',
tornado.web.StaticFileHandler,
dict(path=settings['static_path'])),
# {'path': os.path.normpath(os.path.dirname(__file__))}),
url(r'/srlg/([0-9]+)', SRLGHandler),
url(r'/srlg/static/(.*)',
tornado.web.StaticFileHandler,
dict(path=settings['static_path'])),
url(r'/roadmlinks', ROADMLinksHandler, name="roadm_links"),
url(r'/roadmnodes', ROADMNodesHandler, name="roadm_nodes"),
url(r'/mplsnodes', MPLSNodesHandler, name="mpls_nodes"),
url(r'/allsrlg', AllSRLGHandler, name="all_srlg"),
url(r'/topolinks-ad/?', AddDropTopoLinksHandler, name="ad-topo_links"),
url(r'/topolinks-ad/static/(.*)',
tornado.web.StaticFileHandler,
dict(path=settings['static_path'])),
url(r'/topolinks-lc/?', LineCardTopoLinksHandler, name="lc-topo_links"),
url(r'/topolinks-lc/static/(.*)',
tornado.web.StaticFileHandler,
dict(path=settings['static_path'])),
url(r'/ajax', AjaxHandler, name="ajax")
]
application = tornado.web.Application(handlers)
application.listen(args.port)
# webbrowser.open("http://localhost:%d/" % args.port, new=2)
# tornado.ioloop.IOLoop.instance().start()
tornado.ioloop.IOLoop.current().start()
def clean_files():
# Delete all output files
logging.info("Cleaning files from last collection...")
try:
remove_tree('jsonfiles')
remove_tree('jsongets')
except Exception as err:
logging.info("No files to cleanup...")
# Recreate output directories
mkpath('jsonfiles')
mkpath('jsongets')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Starts a webserver for stuff.")
parser.add_argument("--port", type=int, default=8000, help="The port on which "
"to serve the website.")
args = parser.parse_args()
main()
|
"""
Handles data retrieval from KEGG.
Version: 1.1 (October 2018)
License: MIT
Author: Alexandra Zaharia (contact@alexandra-zaharia.org)
"""
import urllib2
import sys
import os
import re
from CoMetGeNeError import CoMetGeNeError
from os.path import exists, isdir, basename
from ..definitions import PICKLE_EC, PICKLE_GENOME
from ..utils import pickle, unpickle
def check_directory(directory):
"""If the specified directory exists, check whether the permissions are
sufficient. If the directory does not exist, attempt to create it.
:param directory: directory where KEGG pathways will be stored in KGML
format
"""
if exists(directory):
if not isdir(directory):
raise CoMetGeNeError(
'import_not_dir', basename(__file__), directory)
else:
if not os.access(directory, os.R_OK):
raise CoMetGeNeError(
'import_not_r', basename(__file__), directory)
if not os.access(directory, os.W_OK):
raise CoMetGeNeError(
'import_not_w', basename(__file__), directory)
if not os.access(directory, os.X_OK):
raise CoMetGeNeError(
'import_not_x', basename(__file__), directory)
else: # Create the output directory if it doesn't exist.
try:
os.makedirs(directory)
except Exception:
raise CoMetGeNeError('import_mkdir', basename(__file__), directory)
def remove_meta_pathways(pathways):
"""Removes meta-pathway maps, i.e. maps with an ID starting from 01100, from
a list of KEGG pathway map IDs.
The 'pathways' list is modified.
:param pathways: list of strings designating KEGG pathway map IDs
"""
ids_to_remove = list()
for pathway in pathways:
digit_match = re.search('\d{5}', pathway)
assert digit_match
kegg_id = int(digit_match.group())
if kegg_id >= 1100:
ids_to_remove.append(pathway)
for meta_pw in ids_to_remove:
pathways.remove(meta_pw)
def save_pathways(pathways, organism, directory):
"""Downloads the specified pathway maps from KEGG in KGML format for a given
species and saves them to the specified directory.
If the output file already exists, it is not overwritten.
:param pathways: list of KEGG pathway map IDs
:param organism: species for which to retrieve the pathways
:param directory: directory where pathway maps for 'organism' will be stored
in KGML format
"""
saved_files = 0
for i in range(len(pathways)):
kgml = os.path.join(directory, 'path_' + pathways[i] + '.kgml')
if exists(kgml):
print "\tOutput file %s already exists. Skipping." % kgml
sys.stdout.flush()
else:
print "\tRetrieving pathway", pathways[i], \
"(" + str(i+1) + "/" + str(len(pathways)) + ") ...",
sys.stdout.flush()
pathway_url = 'http://rest.kegg.jp/get/' + pathways[i] + '/kgml'
try:
data = urllib2.urlopen(pathway_url)
xml = data.read()
print "done"
print "\t\tWriting pathway to file", kgml, "...",
sys.stdout.flush()
k_out = open(kgml, 'w')
k_out.write(xml)
k_out.close()
saved_files += 1
print "done"
except urllib2.URLError:
fmt_msg = "\n%s: HTTP download error\n" % \
os.path.basename(__file__)
sys.stderr.write(fmt_msg)
except IOError:
fmt_msg = "\n%s: Error writing to file\n" % \
os.path.basename(__file__)
sys.stderr.write(fmt_msg)
print "Done! Saved %d pathway(s) for '%s' under %s.\n" % (
saved_files, organism, directory)
def download_kgml(organism, directory):
"""Downloads all non-meta-pathways for a given species and saves them to the
specified directory unless the output files already exist.
A KEGG meta-pathway has an ID starting from 01100.
:param organism: species for which to retrieve the pathways
:param directory: directory where pathway maps for 'organism' will be stored
in KGML format
"""
if len(organism) != 3 and len(organism) != 4:
raise CoMetGeNeError('import_org_code', basename(__file__), organism)
# Ensure there are sufficient permissions on the output directory, and
# create it if necessary.
check_directory(directory)
# Retrieve a list of metabolic pathways for the query organism.
print "Retrieving metabolic pathways for '" + organism + "':"
sys.stdout.flush()
pathway_list_url = 'http://rest.kegg.jp/list/pathway/' + organism
try:
data = urllib2.urlopen(pathway_list_url)
pathways = data.read().split('\n')
pathways = filter(lambda x: x != '', pathways) # remove empty lines
pathways = [pw.split()[0].split(':')[1] for pw in pathways]
remove_meta_pathways(pathways)
except urllib2.URLError:
raise CoMetGeNeError('import_not_found', basename(__file__), organism)
if len(pathways) == 0:
raise CoMetGeNeError('import_not_found', basename(__file__), organism)
save_pathways(pathways, organism, directory)
def retrieve_ec_numbers():
"""Retrieves and returns EC number and R number associations, downloading
the required information from KEGG if necessary.
If the destination file designated by PICKLE_EC already exists, its contents
is simply loaded (un-pickled) and returned.
:return: dict associating R numbers (keys) to EC numbers (values)
"""
if not os.path.exists(PICKLE_EC):
query_url = 'http://rest.kegg.jp/link/reaction/enzyme'
print "Retrieving EC numbers from KEGG ...",
try:
data = urllib2.urlopen(query_url)
results = data.read().split('\n')
except urllib2.URLError:
raise CoMetGeNeError('import_ec', basename(__file__), None)
ec_numbers = dict()
for line in results:
if len(line) > 0: # Ignore empty lines.
ec_data = line.split('\t')
assert len(ec_data) == 2
ec = ec_data[0].replace('ec:', '')
reaction = ec_data[1]
if reaction not in ec_numbers:
ec_numbers[reaction] = list()
ec_numbers[reaction].append(ec)
print "done\n"
pickle(PICKLE_EC, ec_numbers)
else:
ec_numbers = unpickle(PICKLE_EC)
return ec_numbers
def get_slices(data, slice_size):
"""Slices up and returns the data in slices of slice_size.
:param data: list to divide in one or several slices of size slice_size
:param slice_size: integer designating the size of a slice from data
:return: list of len(data) / slice_size slices of data of size slice_size if
the number of items in data is a multiple of slice_size, or list of
len(data) / slice_size + 1 slices of data of size slice_size except for
the last slice, of size len(data) - slice_size * len(data) / slice_size
"""
slices = list()
indexes = [i for i in range(0, len(data), slice_size)]
for i in range(0, len(indexes) - 1):
slices.append(data[indexes[i]:indexes[i + 1]])
if len(data) > indexes[-1]: # is there a last slice?
slices.append(data[indexes[-1]:])
return slices
def _remove_complement(position):
if 'complement' in position:
position = position.replace('complement(', '')
position = position[:-2] # remove final closing bracket
return position
def _get_position(position):
"""Returns the numerical position contained within the string 'position'.
:param position: string containing a position, e.g. '1000', or a position
range, e.g. '1000..2000'
:return: -1 if positional information is invalid, or one or two integers
deisgnating the position otherwise
"""
position = _remove_complement(position)
if position.isdigit():
return int(position)
if ',' in position or '..' not in position:
return -1
start, end = position.split('..')
if not start.isdigit() or not end.isdigit():
return -1
return int(start), int(end)
def _get_position_when_join_present(position):
"""Returns the list of integers corresponding to the string 'position', if
the keyword 'join' is present.
Examples:
for 'join(1000..2000,3000..4000)', return [(1000, 2000), (3000, 4000)]
for 'join(5000,6000..7000)', return [(5000, 7000)]
for 'join(8000..9000,10000)', return [(8000, 10000)]
for 'join(<8000..9000, 10000)', return [-1]
:param position: string containing the keyword 'join' and positional
informaiton
:return: list of integer tuples corresponding to 'positions', or list
[-1] if positional information is invalid
"""
position = _remove_complement(position)
position = position.replace('join(', '').replace(')', '')
if ',' not in position: # invalid positional information
return [-1]
pos_fields = position.split(',')
pos_int = [_get_position(field) for field in pos_fields]
if -1 in pos_int: # invalid positional information
return [-1]
unique_pos = [pos for pos in pos_int if type(pos) is not tuple]
if len(unique_pos) == 0:
return pos_int
else:
pos_list = list()
assert len(unique_pos) == 1
index = pos_int.index(unique_pos[0])
for i in range(len(pos_int)):
if i == index:
continue
if i == index + 1:
if pos_int[index] >= pos_int[i][1]:
return [-1]
pos_list.append((pos_int[index], pos_int[i][1]))
elif i == index - 1:
if pos_int[index] <= pos_int[i][0]:
return [-1]
pos_list.append((pos_int[i][0], pos_int[index]))
else:
pos_list.append(pos_int[i])
return pos_list
def extract_gene_info(gene_info, org, genomes):
"""Stores information on a given protein-coding gene of species 'org' in the
dict 'genomes'.
The 'genomes' dict is modified if 'gene_info' designates a CDS with valid
positional information.
:param gene_info: textual information on a gene entry for species 'org', as
retrieved from KEGG GENES
:param org: species to which belongs the gene description gene_info
:param genomes: dict of dicts storing gene information for every gene of
every species in the dict, namely the name of the chromosome on which
the gene is located, the strand on the chromosome, as well as the
position of the gene on the chromosome (in nucleotides)
"""
if gene_info[0].split()[2] != 'CDS':
return
gene = org + ':' + gene_info[0].split()[1]
gene_dict = dict()
pos_info = ''
for entry in gene_info:
if entry.split()[0] == 'PATHWAY':
gene_dict['enzyme'] = True
elif entry.split()[0] == 'POSITION':
pos_info = entry.split()[1]
if 'enzyme' not in gene_dict: # assumes POSITION is after PATHWAY
gene_dict['enzyme'] = False
break
if len(pos_info) == 0:
sys.stderr.write('Ignoring gene %s (no positional information)\n' %
gene)
return
fields = pos_info.split(':')
if len(fields) == 1: # only one chromosome
gene_dict['chr'] = 'chromosome'
pos = 0
else:
gene_dict['chr'] = 'chromosome ' + fields[0]
pos = 1
gene_dict['fwd'] = False if 'complement' in fields[pos] else True
gene_dict['pos'] = list()
if 'join' in fields[pos]:
gene_dict['pos'] = _get_position_when_join_present(fields[pos])
else:
gene_dict['pos'].append(_get_position(fields[pos]))
if -1 in gene_dict['pos']:
sys.stderr.write('Ignoring gene %s (invalid positional information)\n' %
gene)
else:
genomes[org][gene] = gene_dict
def retrieve_gene_info(genes, organism, genomes):
"""For every KEGG GENES entry in 'genes' designating multiple genes of
species 'organism', retrieves the relevant gene information for coding
sequences and stores it in the dict 'genomes'.
The 'genomes' dict is modified if at least one gene in 'genes' is a CDS.
:param genes: list of strings designating multiple KEGG GENES entries
separated by a line containing '///'
:param organism: species to which the gene entries in 'genes' belong to
:param genomes: dict of dicts storing gene information for every gene of
every species in the dict, namely the name of the chromosome on which
the gene is located, the strand on the chromosome, as well as the
position of the gene on the chromosome (in nucleotides)
"""
query_url = 'http://rest.kegg.jp/get/' + '+'.join(genes)
data = urllib2.urlopen(query_url).read().split('\n')
indices = [i for i, x in enumerate(data) if x == "///"]
if len(indices) == 0 or len(indices) == 1:
extract_gene_info(data, organism, genomes)
else:
for i in range(len(indices) - 1):
if i == 0:
extract_gene_info(data[:indices[i]], organism, genomes)
extract_gene_info(
data[(indices[i] + 1):indices[i + 1]], organism, genomes)
def retrieve_genome_info(organism, genomes=None, lock=None):
"""If species 'organism' is not present in the dict 'genomes' storing gene
information for several species, retrieves all genomic information from
KEGG GENES and stores it in the dict 'genomes'.
If 'genomes' is not None and 'organism' is not a key of 'genomes', then the
dict 'genomes' is modified.
The return value of this method is retrieved by CoMetGeNe.py, and not
retrieved by CoMetGeNe_launcher.py, respectively.
:param organism: species for which genomic information will be retrieved if
not already present in the dict 'genomes'
:param genomes: dict of dicts storing gene information for every gene of
every species in the dict, namely the name of the chromosome on which
the gene is located, the strand on the chromosome, as well as the
position of the gene on the chromosome (in nucleotides)
:param lock: multiprocessing lock to prevent processes reading from and
writing to the genomes pickle file simultaneously
:return: the species for which genomic information has been retrieved
('organism') and the associated genomic information
"""
if genomes is not None and organism in genomes:
return
genes_dict = genomes if genomes is not None else dict()
genes_dict[organism] = dict()
list_url = 'http://rest.kegg.jp/list/' + organism
kegg_gene_ids = list()
try:
data = urllib2.urlopen(list_url).read().split('\n')
data = filter(lambda x: x != '', data) # Remove empty lines.
for line in data:
kegg_gene_ids.append(line.split()[0])
slice_size = 10
gene_counter = 0
for genes in get_slices(kegg_gene_ids, slice_size):
gene_counter = min(gene_counter + slice_size, len(kegg_gene_ids))
progress = "Retrieving information for '%s' genes: %d/%d" % (
organism, gene_counter, len(kegg_gene_ids))
sys.stdout.write('%s\r' % progress)
sys.stdout.flush()
retrieve_gene_info(genes, organism, genes_dict)
sys.stdout.write('\n')
except urllib2.URLError:
fmt_msg = "\n%s: HTTP download error\n" % os.path.basename(__file__)
sys.stderr.write(fmt_msg)
if lock is not None:
lock.acquire()
if os.path.exists(PICKLE_GENOME):
genomes = unpickle(PICKLE_GENOME)
else:
genomes = dict()
genomes[organism] = genes_dict[organism]
pickle(PICKLE_GENOME, genomes)
lock.release()
|
#!/usr/bin/env python3
import sys
import os
def print_usage():
print("HAL plugin template generator")
print(" usage: new_plugin <name>")
print("")
print("Sets up the directory structure and respective files in the current directory:")
print("<name>/")
print(" |- include/")
print(" | |- factory_<name>.h")
print(" | |- plugin_<name>.h")
print(" |- python/")
print(" | |- python_bindings.cpp")
print(" |- src/")
print(" | |- factory_<name>.cpp")
print(" | |- plugin_<name>.cpp")
print(" |- CMakeLists.txt")
print("")
#################################################################
############## Templates ##############
#################################################################
CMAKE_TEMPLATE ="""option(PL_##UPPER## "PL_##UPPER##" OFF)
if(PL_##UPPER## OR BUILD_ALL_PLUGINS)
include_directories(SYSTEM ${PYBIND11_INCLUDE_DIR} SYSTEM ${PYTHON_INCLUDE_DIRS})
include_directories(AFTER "${CMAKE_CURRENT_SOURCE_DIR}/include")
file(GLOB_RECURSE ##UPPER##_INC ${CMAKE_CURRENT_SOURCE_DIR}/include/*.h)
file(GLOB_RECURSE ##UPPER##_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp)
file(GLOB_RECURSE ##UPPER##_PYTHON_SRC ${CMAKE_CURRENT_SOURCE_DIR}/python/*.cpp)
add_library(##LOWER## SHARED ${##UPPER##_SRC} ${##UPPER##_PYTHON_SRC} ${##UPPER##_INC})
set_target_properties(##LOWER## PROPERTIES PREFIX "")
if(APPLE AND CMAKE_HOST_APPLE)
set_target_properties(##LOWER## PROPERTIES SUFFIX ".so")
set_target_properties(##LOWER## PROPERTIES INSTALL_NAME_DIR ${PLUGIN_LIBRARY_INSTALL_DIRECTORY})
endif(APPLE AND CMAKE_HOST_APPLE)
target_link_libraries(##LOWER## ${LINK_LIBS} ${PYTHON_LIBRARIES} pybind11::module ${BUDDY_LIBRARY})
install(TARGETS ##LOWER## LIBRARY DESTINATION ${LIBRARY_INSTALL_DIRECTORY} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE INCLUDES DESTINATION ${INCLUDE_INSTALL_DIRECTORY})
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ DESTINATION ${PLUGIN_INCLUDE_INSTALL_DIRECTORY}/##LOWER##/include/)
if(${CMAKE_BUILD_TYPE} STREQUAL "Debug")
add_sanitizers(##LOWER##)
endif()
endif()
"""
#################################################################
#################################################################
FACTORY_H_TEMPLATE = """#ifndef __HAL_FACTORY_##UPPER##_H__
#define __HAL_FACTORY_##UPPER##_H__
#include "core/interface_factory.h"
class PLUGIN_API factory_##LOWER## : public i_factory
{
public:
/** interface implementation: i_factory */
std::shared_ptr<i_base> get_plugin_instance() override;
};
extern "C" PLUGIN_API i_factory* get_factory();
#endif /* __HAL_FACTORY_##UPPER##_H__ */
"""
#################################################################
#################################################################
FACTORY_CPP_TEMPLATE = """#include "factory_##LOWER##.h"
#include "plugin_##LOWER##.h"
std::shared_ptr<i_base> factory_##LOWER##::get_plugin_instance()
{
return std::dynamic_pointer_cast<i_base>(std::make_shared<plugin_##LOWER##>());
}
extern i_factory* get_factory()
{
static factory_##LOWER##* factory = new factory_##LOWER##();
return (i_factory*)factory;
}
"""
#################################################################
#################################################################
PLUGIN_H_TEMPLATE = """#ifndef __PLUGIN_##UPPER##_H__
#define __PLUGIN_##UPPER##_H__
#include "core/interface_base.h"
class PLUGIN_API plugin_##LOWER## : virtual public i_base
{
public:
/*
* interface implementations
*/
plugin_##LOWER##() = default;
~plugin_##LOWER##() = default;
std::string get_name() override;
std::string get_version() override;
std::set<interface_type> get_type() override;
};
#endif /* __PLUGIN_##UPPER##_H__ */
"""
#################################################################
#################################################################
PLUGIN_CPP_TEMPLATE = """#include "plugin_##LOWER##.h"
std::string plugin_##LOWER##::get_name()
{
return std::string("##LOWER##");
}
std::string plugin_##LOWER##::get_version()
{
return std::string("0.1");
}
std::set<interface_type> plugin_##LOWER##::get_type()
{
return {interface_type::base};
}
"""
#################################################################
#################################################################
PYTHON_CPP_TEMPLATE = """#include "pybind11/operators.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"
#include "pybind11/stl_bind.h"
#include "plugin_##LOWER##.h"
namespace py = pybind11;
#ifdef PYBIND11_MODULE
PYBIND11_MODULE(##LOWER##, m)
{
m.doc() = "hal ##LOWER## python bindings";
#else
PYBIND11_PLUGIN(##LOWER##)
{
py::module m("##LOWER##", "hal ##LOWER## python bindings");
#endif // ifdef PYBIND11_MODULE
py::class_<plugin_##LOWER##, std::shared_ptr<plugin_##LOWER##>, i_base>(m, "##LOWER##")
.def(py::init<>())
.def_property_readonly("name", &plugin_##LOWER##::get_name)
.def("get_name", &plugin_##LOWER##::get_name)
.def_property_readonly("version", &plugin_##LOWER##::get_version)
.def("get_version", &plugin_##LOWER##::get_version)
;
#ifndef PYBIND11_MODULE
return m.ptr();
#endif // PYBIND11_MODULE
}
"""
#################################################################
############## CORE ##############
#################################################################
def create_plugin(name):
lower = name.lower()
upper = name.upper()
os.makedirs(name)
with open(name+"/CMakeLists.txt", "wt") as f:
f.write(CMAKE_TEMPLATE.replace("##UPPER##", upper).replace("##LOWER##", lower))
os.makedirs(name+"/include")
with open(name+"/include/factory_"+lower+".h", "wt") as f:
f.write(FACTORY_H_TEMPLATE.replace("##UPPER##", upper).replace("##LOWER##", lower))
with open(name+"/include/plugin_"+lower+".h", "wt") as f:
f.write(PLUGIN_H_TEMPLATE.replace("##UPPER##", upper).replace("##LOWER##", lower))
os.makedirs(name+"/src")
with open(name+"/src/factory_"+lower+".cpp", "wt") as f:
f.write(FACTORY_CPP_TEMPLATE.replace("##UPPER##", upper).replace("##LOWER##", lower))
with open(name+"/src/plugin_"+lower+".cpp", "wt") as f:
f.write(PLUGIN_CPP_TEMPLATE.replace("##UPPER##", upper).replace("##LOWER##", lower))
os.makedirs(name+"/python")
with open(name+"/python/python_bindings.cpp", "wt") as f:
f.write(PYTHON_CPP_TEMPLATE.replace("##UPPER##", upper).replace("##LOWER##", lower))
if len(sys.argv) != 2:
print_usage()
sys.stderr.write("ERROR: unsupported number of parameters\n")
sys.exit(-1)
name = sys.argv[1].lower()
if not name.replace("_","").isalnum() or name[0] == "_" or name[0].isnumeric():
print_usage()
sys.stderr.write("ERROR: '{}' is not a valid C++ identifier\n".format(name))
sys.exit(-1)
if os.path.exists(name):
print_usage()
sys.stderr.write("ERROR: directory '{}' already exists\n".format(name))
sys.exit(-1)
create_plugin(sys.argv[1])
|
#
# Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending
#
""" This module defines the data types supported by the Deephaven engine.
Each data type is represented by a DType class which supports creating arrays of the same type and more.
"""
from __future__ import annotations
from typing import Any, Sequence, Callable, Dict, Type, Union
import jpy
import numpy as np
import pandas as pd
from deephaven import DHError
_JQstType = jpy.get_type("io.deephaven.qst.type.Type")
_JTableTools = jpy.get_type("io.deephaven.engine.util.TableTools")
_j_name_type_map: Dict[str, DType] = {}
def _qst_custom_type(cls_name: str):
return _JQstType.find(_JTableTools.typeFromName(cls_name))
class DType:
""" A class representing a data type in Deephaven."""
def __init__(self, j_name: str, j_type: Type = None, qst_type: jpy.JType = None, is_primitive: bool = False,
np_type: Any = np.object_):
"""
Args:
j_name (str): the full qualified name of the Java class
j_type (Type): the mapped Python class created by JPY
qst_type (JType): the JPY wrapped object for a instance of QST Type
is_primitive (bool): whether this instance represents a primitive Java type
np_type (Any): an instance of numpy dtype (dtype("int64") or numpy class (e.g. np.int16), default is
np.object_
"""
self.j_name = j_name
self.j_type = j_type if j_type else jpy.get_type(j_name)
self.qst_type = qst_type if qst_type else _qst_custom_type(j_name)
self.is_primitive = is_primitive
self.np_type = np_type
_j_name_type_map[j_name] = self
def __repr__(self):
return self.j_name
def __call__(self, *args, **kwargs):
if self.is_primitive:
raise DHError(message=f"primitive type {self.j_name} is not callable.")
try:
return self.j_type(*args, **kwargs)
except Exception as e:
raise DHError(e, f"failed to create an instance of {self.j_name}") from e
bool_ = DType(j_name="java.lang.Boolean", qst_type=_JQstType.booleanType(), np_type=np.bool_)
"""Boolean type"""
byte = DType(j_name="byte", qst_type=_JQstType.byteType(), is_primitive=True, np_type=np.int8)
"""Signed byte integer type"""
int8 = byte
"""Signed byte integer type"""
short = DType(j_name="short", qst_type=_JQstType.shortType(), is_primitive=True, np_type=np.int16)
"""Signed short integer type"""
int16 = short
"""Signed short integer type"""
char = DType(j_name="char", qst_type=_JQstType.charType(), is_primitive=True, np_type=np.dtype('uint16'))
"""Character type"""
int32 = DType(j_name="int", qst_type=_JQstType.intType(), is_primitive=True, np_type=np.int32)
"""Signed 32bit integer type"""
long = DType(j_name="long", qst_type=_JQstType.longType(), is_primitive=True, np_type=np.int64)
"""Signed 64bit integer type"""
int64 = long
"""Signed 64bit integer type"""
int_ = long
"""Signed 64bit integer type"""
float32 = DType(j_name="float", qst_type=_JQstType.floatType(), is_primitive=True, np_type=np.float32)
"""Single-precision floating-point number type"""
single = float32
"""Single-precision floating-point number type"""
float64 = DType(j_name="double", qst_type=_JQstType.doubleType(), is_primitive=True, np_type=np.float64)
"""Double-precision floating-point number type"""
double = float64
"""Double-precision floating-point number type"""
float_ = float64
"""Double-precision floating-point number type"""
string = DType(j_name="java.lang.String", qst_type=_JQstType.stringType())
"""String type"""
BigDecimal = DType(j_name="java.math.BigDecimal")
"""Java BigDecimal type"""
StringSet = DType(j_name="io.deephaven.stringset.StringSet")
"""Deephaven StringSet type"""
DateTime = DType(j_name="io.deephaven.time.DateTime", np_type=np.dtype("datetime64[ns]"))
"""Deephaven DateTime type"""
Period = DType(j_name="io.deephaven.time.Period")
"""Deephaven time period type"""
PyObject = DType(j_name="org.jpy.PyObject")
"""Python object type"""
JObject = DType(j_name="java.lang.Object")
"""Java Object type"""
byte_array = DType(j_name='[B')
"""Byte array type"""
int8_array = byte_array
"""Byte array type"""
short_array = DType(j_name='[S')
"""Short array type"""
int16_array = short_array
"""Short array type"""
int32_array = DType(j_name='[I')
"""32bit integer array type"""
long_array = DType(j_name='[J')
"""64bit integer array type"""
int64_array = long_array
"""64bit integer array type"""
int_array = long_array
"""64bit integer array type"""
single_array = DType(j_name='[S')
"""Single-precision floating-point array type"""
float32_array = single_array
"""Single-precision floating-point array type"""
double_array = DType(j_name='[D')
"""Double-precision floating-point array type"""
float64_array = double_array
"""Double-precision floating-point array type"""
float_array = double_array
"""Double-precision floating-point array type"""
string_array = DType(j_name='[Ljava.lang.String;')
"""Java String array type"""
datetime_array = DType(j_name='[Lio.deephaven.time.DateTime;')
"""Deephaven DateTime array type"""
def array(dtype: DType, seq: Sequence, remap: Callable[[Any], Any] = None) -> jpy.JType:
""" Creates a Java array of the specified data type populated with values from a sequence.
Note:
this method does unsafe casting, meaning precision and values might be lost with down cast
Args:
dtype (DType): the component type of the array
seq (Sequence): a sequence of compatible data, e.g. list, tuple, numpy array, Pandas series, etc.
remap (optional): a callable that takes one value and maps it to another, for handling the translation of
special DH values such as NULL_INT, NAN_INT between Python and the DH engine
Returns:
a Java array
Raises:
DHError
"""
try:
if remap:
if not callable(remap):
raise ValueError("Not a callable")
seq = [remap(v) for v in seq]
else:
if isinstance(seq, str) and dtype == char:
return array(char, seq, remap=ord)
return jpy.array(dtype.j_type, seq)
except Exception as e:
raise DHError(e, f"failed to create a Java {dtype.j_name} array.") from e
def from_jtype(j_class: Any) -> DType:
""" looks up a DType that matches the java type, if not found, creates a DType for it. """
if not j_class:
return None
j_name = j_class.getName()
dtype = _j_name_type_map.get(j_name)
if not dtype:
return DType(j_name=j_name, j_type=j_class, np_type=np.object_)
else:
return dtype
def from_np_dtype(np_dtype: Union[np.dtype, pd.api.extensions.ExtensionDtype]) -> DType:
""" Looks up a DType that matches the provided numpy dtype or Pandas's nullable equivalent; if not found,
returns PyObject. """
if isinstance(np_dtype, pd.api.extensions.ExtensionDtype):
# check if it is a Pandas nullable numeric types such as pd.Float64Dtype/Int32Dtype/BooleanDtype etc.
if hasattr(np_dtype, "numpy_dtype"):
np_dtype = np_dtype.numpy_dtype
elif isinstance(np_dtype, pd.StringDtype):
return string
else:
return PyObject
if np_dtype.kind in {'U', 'S'}:
return string
for _, dtype in _j_name_type_map.items():
if np.dtype(dtype.np_type) == np_dtype and dtype.np_type != np.object_:
return dtype
return PyObject
|
import runpy
#from distutils.core import setup, Extension
from setuptools import Extension, find_packages, setup
from setuptools.command.build_py import build_py
# Third-party modules - we depend on numpy for everything
import numpy
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
module1 = Extension('_pylibmodes',
# define_macros = [('MAJOR_VERSION', '1'),
# ('MINOR_VERSION', '0')],
include_dirs = ['include'],
# libraries = ['tcl83'],
# library_dirs = ['/usr/local/lib'],
# sources = ['src/mode-s.c', 'swig/modes_wrap.c'])
sources = ['src/mode-s.c', 'src/modes.i'])
# Build extensions before python modules,
# or the generated SWIG python files will be missing.
class BuildPy(build_py):
def run(self):
self.run_command('build_ext')
super(build_py, self).run()
setup (
name = 'pylibmodes',
version = '1.0',
description = 'Python wrapper to libmodes',
author = 'Richard Baker',
author_email = 'richard.baker@cs.ox.ac.uk',
url = 'https://docs.python.org/extending/building',
#long_description = '''Python wrapper to libmodes''',
packages=find_packages('src'),
package_dir={'': 'src'},
ext_modules = [module1],
py_modules = ["pylibmodes"],
cmdclass = { 'build_py': BuildPy }
)
|
import gzip
import re
import os
import time
from sys import argv
import concurrent.futures
import glob
# Keep track of when the script began
startTime = time.time()
char = '\n' + ('*' * 70) + '\n'
# Input file or list of files
inputFile = argv[1]
pathToFiles = argv[2]
numCores = int(argv[3])
if pathToFiles.endswith("/"):
pathToFiles = pathToFiles[0:-1]
# Download reference files if needed
if not os.path.exists("/references/Homo_sapiens_assembly38.fasta"):
os.system("wget --no-check-certificate \
https://files.osf.io/v1/resources/3znuj/providers/osfstorage/5d9f54d2a7bc73000ee99fd6/?zip= -O /tmp/references.zip \
&& unzip /tmp/references.zip -d /tmp/references \
&& rm /tmp/references.zip \
&& gzip -d /tmp/references/*.gz")
for file in glob.glob("/tmp/references/*"):
fileName = file.split("/")[-1]
if not os.path.exists(f"/references/{fileName}"):
os.system(f"mv {file} /references/")
os.system("chmod 777 /references/*")
# Create a dictionary of files that need to be combined into one vcf file
fileDict = {}
familyList = []
with open(inputFile) as sampleFile:
header = sampleFile.readline()
headerList = header.rstrip().split("\t")
fileNameIndex = headerList.index("file_name")
familyIdIndex = headerList.index("family_id")
sampleIdIndex = headerList.index("sample_id")
for sample in sampleFile:
sampleData = sample.rstrip("\n").split("\t")
sampleId = sampleData[sampleIdIndex]
sampleFamilyId = sampleData[familyIdIndex]
actualFileName = f"{pathToFiles}/{sampleFamilyId}/{sampleId}/{sampleId}_parsed.vcf.gz"
outputName = f"{pathToFiles}/{sampleFamilyId}/{sampleFamilyId}_trio/{sampleFamilyId}_trio.vcf.gz"
if sampleFamilyId not in fileDict and os.path.exists(f"{pathToFiles}/{sampleFamilyId}") and not os.path.exists(f"{outputName}"):
fileDict[sampleFamilyId] = [actualFileName]
familyList.append(sampleFamilyId)
elif sampleFamilyId in fileDict and os.path.exists(f"{pathToFiles}/{sampleFamilyId}") and not os.path.exists(f"{outputName}"):
fileDict[sampleFamilyId].append(actualFileName)
probandDict = {}
parentDict = {}
with open(inputFile) as sampleFile:
header = sampleFile.readline()
headerList = header.rstrip().split("\t")
fileNameIndex = headerList.index("file_name")
familyIdIndex = headerList.index("family_id")
sampleIdIndex = headerList.index("sample_id")
probandIndex = headerList.index("proband")
genderIndex = headerList.index("sex")
for sample in sampleFile:
sampleData = sample.rstrip("\n").split("\t")
fileName = sampleData[fileNameIndex]
sampleFamilyId = sampleData[familyIdIndex]
sampleId = sampleData[sampleIdIndex]
probandStatus = sampleData[probandIndex]
gender = sampleData[genderIndex]
if probandStatus == "Yes":
probandDict[sampleId] = sampleFamilyId
else:
if sampleFamilyId not in parentDict:
parentDict[sampleFamilyId] = {sampleId: gender}
else:
parentDict[sampleFamilyId][sampleId] = gender
# Create fam files
def createFamFiles(proband):
familyId = probandDict[proband]
familyDict = parentDict[familyId]
paternal = ""
maternal = ""
outputString = ""
sampleDict = {}
for key, value in familyDict.items():
if value == "1":
paternal = key
else:
maternal = key
with open(inputFile) as sampleFile:
header = sampleFile.readline()
headerList = header.rstrip().split("\t")
fileNameIndex = headerList.index("file_name")
familyIdIndex = headerList.index("family_id")
sampleIdIndex = headerList.index("sample_id")
probandIndex = headerList.index("proband")
genderIndex = headerList.index("sex")
for sample in sampleFile:
sampleData = sample.rstrip("\n").split("\t")
fileName = sampleData[fileNameIndex]
sampleFamilyId = sampleData[familyIdIndex]
sampleId = sampleData[sampleIdIndex]
probandStatus = sampleData[probandIndex]
gender = sampleData[genderIndex]
if probandStatus == "Yes" and familyId == sampleFamilyId:
sampleDict[sampleId] = f"{sampleFamilyId}\t{sampleId}\t{paternal}\t{maternal}\t{gender}\t2\n"
elif probandStatus == "No" and familyId == sampleFamilyId:
sampleDict[sampleId] = f"{sampleFamilyId}\t{sampleId}\t0\t0\t{gender}\t1\n"
with open(f"{pathToFiles}/{familyId}/{familyId}_trio.fam", "w") as outputFile:
for key, value in sorted(sampleDict.items()):
outputFile.write(value)
with concurrent.futures.ProcessPoolExecutor(max_workers=numCores) as executor:
executor.map(createFamFiles, probandDict)
# Use GATK to combine all trios into one vcf
def combineTrios(trio):
files = fileDict[trio]
fileString = ""
os.system(f"mkdir {pathToFiles}/{trio}/{trio}_trio")
outputName = f"{pathToFiles}/{trio}/{trio}_trio/{trio}_trio.vcf.gz"
for file in files:
fileString += f"-V {file} "
os.system(f"/root/miniconda2/bin/gatk IndexFeatureFile -F {file}")
os.system(f"/root/miniconda2/bin/gatk CombineGVCFs -R /references/Homo_sapiens_assembly38.fasta {fileString} -O {outputName}")
for i in range(0, len(familyList), numCores):
familyListSlice = familyList[i:(i+numCores)]
with concurrent.futures.ProcessPoolExecutor(max_workers=numCores) as executor:
executor.map(combineTrios, familyListSlice)
# Print message and how long the previous steps took
timeElapsedMinutes = round((time.time()-startTime) / 60, 2)
timeElapsedHours = round(timeElapsedMinutes / 60, 2)
print(f'{char}Done. Time elapsed: {timeElapsedMinutes} minutes ({timeElapsedHours} hours){char}')
|
# -*- coding: UTF-8 -*-
from flask import Blueprint
rcmd_ws = Blueprint("rcmd_ws", __name__, static_folder="static",
template_folder="templates")
import genuine_ap.rcmd.views
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 17:31:25 2020
read and write soil parameters for SUMMA
@author: jimmy
"""
import pandas as pd
def parseSoilTBL(fname):
# fname = '/home/jimmy/phd/Extra/HydroCourse/Week03/code/pbhmCourse_student/2_process_based_modelling/settings/plumber/UniMich/SOILPARM.TBL'
fh = open(fname,'r')
lines = fh.readlines()
fh.close()
header_lines = []
model_type = [] # eg rosetta or stars
ntypes = []
data = {}
for i in range(len(lines)):
l = lines[i].strip()
if l == 'Soil Parameters':
header_lines.append(i)
model_type.append(lines[i+1].strip())
tmp = lines[i+2].strip().split()[0]
ntypes.append(int(tmp.split(',')[0]))
d = {}
headers = lines[i+2].strip().split()
for h in headers:
d[h] = []
for j in range(ntypes[-1]):
ln = i+3+j
info = lines[ln].split(',')
if model_type[-1]=='ROSETTA': #special case in rosetta frame work
info = lines[ln].split()
if len(info) != len(headers):
delta = len(info) - len(headers)
for k in range(delta):
info[len(headers)-1] += ' '
info[len(headers)-1] += info[len(headers)+k]
for k in range(len(headers)):
h = headers[k]
tmp = info[k]
if tmp.isdigit():
val = int(tmp)
else:
try:
val = float(tmp)
except ValueError:
val = tmp.strip()
d[h].append(val)
df = pd.DataFrame()
for h in headers:
df[h] = d[h]
data[model_type[-1]]=df
return data
def writeTbl(fname,data):
fh = open(fname,'w')
tables = ['STAS', 'STAS-RUC', 'ROSETTA']
for t in tables:
df = data[t]
fh.write('Soil Parameters\n')
fh.write('%s\n'%t)
#write table headers
for key in df.keys():
fh.write('{:>16} '.format(key))
fh.write('\n')
for i in range(len(df)):
c = 0
for key in df.keys():
c+=1
if c == 1:
fmt = '{:<16d},'
elif c == len(df.keys()):
fmt = ' {:<16}'
else:
fmt = '{:>16.8e},'
fh.write(fmt.format(df[key][i]))
fh.write('\n')
fh.close()
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import butter, lfilter, freqz
import matplotlib.pyplot as plt
def show_hp(unfiltered, timestamps, cutoff, fs, order=5):
b, a = butter_highpass(cutoff, fs, order)
# Frequency response graph
w, h = freqz(b, a, worN=8000)
plt.subplot(2, 1, 1)
plt.plot(0.5 * fs * w / np.pi, np.abs(h), 'b')
plt.plot(cutoff, 0.5 * np.sqrt(2), 'ko')
plt.axvline(cutoff, color='k')
plt.xlim(0, 0.5 * fs)
plt.title("Highpass Filter Frequency Response")
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitute')
plt.grid()
filtered = butter_highpass_filter(unfiltered, cutoff, fs, order)
plt.subplot(2, 1, 2)
plt.plot(timestamps, unfiltered, 'r-', label='Unfiltered')
plt.plot(timestamps, filtered, 'g-', linewidth=2, label='High Pass filtered')
plt.xlabel('Time [sec]')
plt.ylabel('Acc Amplitute (mm\/sec2)')
plt.grid()
plt.legend()
plt.subplots_adjust(hspace=0.35)
plt.show()
def butter_highpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
# print "filtering with normal_cutoff = " , normal_cutoff
b, a = butter(order, normal_cutoff, btype='high', analog=False)
return b, a
def butter_highpass_filter(data, cutoff, fs, order=2):
print "High Cutt" , cutoff
b, a = butter_highpass(cutoff, fs, order=order)
y = lfilter(b, a, data)
return y
|
import numpy as np
from .arg_init import init_arg
from .fault_analyzer_decorator import FaultAnalyzerDecorator
class FaultAnalyzerCartoBase(FaultAnalyzerDecorator):
def __init__(self, comp, **kwargs):
super().__init__(comp, **kwargs)
self.coord_name = init_arg("coordinates_name", kwargs)
if "carto_resolution" in kwargs:
self.resolution = init_arg("carto_resolution", kwargs)
else:
self.resolution = self.compute_resolution()
def compute_resolution(self):
resolution = []
for coord in self.coord_name:
#size = max(list(self.df[coord].unique)) + 1 # can't remember why I used this once
size = max(list(self.df[coord])) + 1
resolution.append(size)
return resolution
def update_matrix(self, ope, mat):
"""Update the matrix with the coordinates of the operation.
Arguments:
ope - the operation, which is a line from the dataframe, containing all
the information about this step of the experiment.
"""
ope_coord = []
for coord in self.coord_name:
if np.isnan(ope[coord]):
return
ope_coord.append(int(ope[coord]))
mat[tuple(ope_coord)] += 1
|
from random import randint
def generate_data_item(i: int) -> float:
return 1.12 * randint(-i * randint(0, 50), 100)
def fill_data(data):
for i in range(len(data)):
data[i] = generate_data_item(i % 10)
return data
def find_max_index(data):
max_value = data[0]
max_index = None
for i in range(len(data)):
if data[i] > max_value:
max_value = data[i]
max_index = i
return max_index
data = [None] * 20
data = fill_data(data)
swap = 0
# swap indexes
max_index = find_max_index(data)
swap = data[4]
data[4] = data[max_index]
data[max_index] = swap
print('Data before swap:')
for i in data:
print('%.2f' % i)
print('\nData after swap:')
for i in range(len(data)):
if i == max_index:
print('%.2f' % data[i], ' <<< swapred')
elif i == 4:
print('%.2f' % data[i], ' <<< swapre')
else:
print('%.2f' % data[i])
|
import numpy as np
from scipy.io import wavfile
import os,joblib,time
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
os.environ['TF_FORCE_GPU_ALLOW_GROWTH']='true'
#import tensorflow.keras.backend as K
#K.set_floatx('float16')
from keras.models import load_model
import pandas as pd
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from nnmnkwii.io import hts
from nnmnkwii import paramgen
from nnmnkwii.preprocessing import trim_zeros_frames
from nnmnkwii.postfilters import merlin_post_filter
from nnmnkwii.frontend import merlin as fe
import pyworld
import pysptk
from mtts.mandarin_frontend import txt2label
from config import *
binary_dict, continuous_dict = hts.load_question_set(hed_path)
X_acoustic_mms = joblib.load(acoustic_mms_path)
Y_acoustic_std = joblib.load(acoustic_std_path)
X_duration_mms = joblib.load(duration_mms_path)
Y_duration_std = joblib.load(duration_std_path)
duration_model = load_model(duration_model_path)
acoustic_model = load_model(acoustic_model_path)
#duration_model.summary()
#acoustic_model.summary()
def gen_parameters(y_predicted):
# Number of time frames
T = y_predicted.shape[0]
# Split acoustic features
mgc = y_predicted[:, :lf0_start_idx]
lf0 = y_predicted[:, lf0_start_idx:vuv_start_idx]
vuv = y_predicted[:, vuv_start_idx]
bap = y_predicted[:, bap_start_idx:]
# Perform MLPG
#Y_acoustic_std.var_
mgc_variances=np.tile(Y_acoustic_std.var_[:lf0_start_idx], (T, 1))
#mgc_variances = np.tile(Y_var[ty][:lf0_start_idx], (T, 1))
mgc = paramgen.mlpg(mgc, mgc_variances, windows)
lf0_variances = np.tile(Y_acoustic_std.var_[lf0_start_idx:vuv_start_idx], (T, 1))
lf0 = paramgen.mlpg(lf0, lf0_variances, windows)
bap_variances = np.tile(Y_acoustic_std.var_[bap_start_idx:], (T, 1))
bap = paramgen.mlpg(bap, bap_variances, windows)
return mgc, lf0, vuv, bap
def gen_waveform(y_predicted, do_postfilter=False):
y_predicted = trim_zeros_frames(y_predicted)
# Generate parameters and split streams
mgc, lf0, vuv, bap = gen_parameters(y_predicted)
if do_postfilter:
mgc = merlin_post_filter(mgc, alpha)
spectrogram = pysptk.mc2sp(mgc, fftlen=fftlen, alpha=alpha)
#print(bap.shape)
aperiodicity = pyworld.decode_aperiodicity(bap.astype(np.float64), fs, fftlen)
f0 = lf0.copy()
f0[vuv < 0.5] = 0
f0[np.nonzero(f0)] = np.exp(f0[np.nonzero(f0)])
generated_waveform = pyworld.synthesize(f0.flatten().astype(np.float64),
spectrogram.astype(np.float64),
aperiodicity.astype(np.float64),
fs, frame_period)
return generated_waveform
def gen_duration(hts_labels, duration_model):
# Linguistic features for duration
#hts_labels = hts.load(label_path)
duration_linguistic_features = fe.linguistic_features(hts_labels,binary_dict, continuous_dict,add_frame_features=False,subphone_features=None).astype(np.float32)
# Apply normalization
duration_linguistic_features =X_duration_mms.transform(duration_linguistic_features)
if len(duration_model.inputs[0].shape)==3:
# seq2seq
n1,n2=duration_linguistic_features.shape
duration_linguistic_features=duration_linguistic_features.reshape(1,n1,n2)
duration_predicted=duration_model.predict(duration_linguistic_features)
if len(duration_predicted.shape)==3:
duration_predicted=duration_predicted.reshape(duration_predicted.shape[1],duration_predicted.shape[2])
duration_predicted=Y_duration_std.inverse_transform(duration_predicted)
duration_predicted = np.round(duration_predicted)
# Set minimum state duration to 1
duration_predicted[duration_predicted <= 0] = 1
hts_labels.set_durations(duration_predicted)
return hts_labels
def test_one_utt(txt, duration_model, acoustic_model, post_filter=True):
# Predict durations
#txt = '中华人民共和国中央人民政府今天成立了'
label=txt2label(txt)
#hts_labels = hts.load(path=label_path)
hts_labels = hts.load(lines=label)
duration_modified_hts_labels = gen_duration(hts_labels, duration_model)
# Linguistic features
linguistic_features = fe.linguistic_features(duration_modified_hts_labels, binary_dict, continuous_dict,add_frame_features=True,subphone_features="coarse_coding")
# Trim silences
indices = duration_modified_hts_labels.silence_frame_indices()
linguistic_features = np.delete(linguistic_features, indices, axis=0)
linguistic_features=X_acoustic_mms.transform(linguistic_features)
if len(acoustic_model.inputs[0].shape) == 3:
# RNN
n1, n2 = linguistic_features.shape
linguistic_features = linguistic_features.reshape(1, n1, n2)
acoustic_predicted = acoustic_model.predict(linguistic_features)
acoustic_predicted = acoustic_predicted.reshape(acoustic_predicted.shape[1], acoustic_predicted.shape[2])
else:
acoustic_predicted = acoustic_model.predict(linguistic_features)
acoustic_predicted = Y_acoustic_std.inverse_transform(acoustic_predicted)
out=gen_waveform(acoustic_predicted, post_filter)
out=out.astype(np.int16)
return out
if __name__ == '__main__':
test=pd.read_csv('misc/test01.csv')
n=test.shape[0]
save_dir = os.path.join("resu" )
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cost_times=[]
for i in range(0,n):
txt=test.text[i]
name = str(test.name[i])
t0=time.time()
waveform = test_one_utt(txt, duration_model, acoustic_model,post_filter=True)
wavfile.write(os.path.join(save_dir, name+'-'+txt[:5] + "-%s.wav" %len(txt)), rate=fs, data=waveform )
|
from django.urls import path,re_path
from products.views import products_list_1,product_item,featured_item
urlpatterns = [
path('', products_list_1,name='query'),
path('', products_list_1,name='query1'),
re_path('(?P<pk>[-@\w]+)/', product_item),
]
|
from .tools import websocket_token_from_session_key
def websocket_token(request):
token = websocket_token_from_session_key(request.session.session_key)
if token:
return {'websocket_token': token}
else:
return {'websocket_token': ''}
|
import json
from RDS import ROParser
import logging
import os
from lib.upload_zenodo import Zenodo
from flask import jsonify, request, g, current_app
from werkzeug.exceptions import abort
from lib.Util import require_api_key, to_jsonld, from_jsonld
logger = logging.getLogger()
@require_api_key
def index():
req = request.json.get("metadata")
depoResponse = g.zenodo.get_deposition(metadataFilter=req)
logger.debug("depo response: {}".format(depoResponse))
output = []
for depo in depoResponse:
try:
metadata = to_jsonld(depo)
except Exception as e:
logger.error(e, exc_info=True)
metadata = depo
output.append({
"projectId": str(depo["prereserve_doi"]["recid"]),
"metadata": metadata
})
return jsonify(output)
@require_api_key
def get(project_id):
req = request.json.get("metadata")
depoResponse = g.zenodo.get_deposition(
id=int(project_id), metadataFilter=req)
logger.debug("depo reponse: {}".format(depoResponse))
output = depoResponse
try:
output = to_jsonld(depoResponse.get("metadata") or depoResponse)
except Exception as e:
logger.error(e, exc_info=True)
output = depoResponse
logger.debug("output: {}".format(output))
return jsonify(output)
def zenodo(res):
result = {}
result["title"] = res["name"]
result["description"] = res["description"]
creator = res["creator"]
result["publication_date"] = res["datePublished"]
creator = []
if not isinstance(res["creator"], list):
res["creator"] = [res["creator"]]
for c in res["creator"]:
if isinstance(c, str):
creator.append({
"name": c
})
else:
creator.append(c)
result["creators"] = creator
if res["zenodocategory"].find("/") > 0:
typ, subtyp = tuple(res["zenodocategory"].split("/", 1))
result["upload_type"] = typ
result["{}_type".format(typ)] = subtyp
return result
@require_api_key
def post():
try:
req = request.get_json(force=True)
metadata = req.get("metadata")
logger.debug(f"got metadata: {metadata}")
if metadata is not None:
try:
doc = ROParser(metadata)
metadata = zenodo(doc.getElement(
doc.rootIdentifier, expand=True, clean=True))
except Exception as e:
logger.error(e, exc_info=True)
logger.debug("send metadata: {}".format(metadata))
depoResponse = g.zenodo.create_new_deposition_internal(
metadata=metadata, return_response=True
)
if depoResponse.status_code < 300:
depoResponse = depoResponse.json()
return jsonify(
{
"projectId": str(depoResponse.get("id")),
"metadata": depoResponse.get("metadata"),
}
)
abort(depoResponse.status_code)
except Exception as e:
logger.error(e, exc_info=True)
abort(500)
@require_api_key
def delete(project_id):
if g.zenodo.remove_deposition_internal(int(project_id)):
return "", 204
abort(404)
@require_api_key
def patch(project_id):
req = request.get_json(force=True)
logger.debug("request data: {}".format(req))
metadata = req.get("metadata")
if metadata is not None:
try:
doc = ROParser(metadata)
metadata = zenodo(doc.getElement(
doc.rootIdentifier, expand=True, clean=True))
except Exception as e:
logger.error(e, exc_info=True)
userId = req.get("userId")
logger.debug("transformed data: {}".format(metadata))
depoResponse = g.zenodo.change_metadata_in_deposition_internal(
deposition_id=int(project_id), metadata=metadata, return_response=True
)
if depoResponse.status_code == 200:
output = depoResponse.json()
logger.debug("output: {}".format(output))
try:
output["metadata"] = to_jsonld(output["metadata"])
except Exception as e:
logger.error(e, exc_info=True)
logger.debug("finished output: {}".format(output))
return jsonify(output["metadata"])
abort(depoResponse.status_code)
@require_api_key
def put(project_id):
if g.zenodo.publish_deposition_internal(deposition_id=int(project_id)):
return True, 200
abort(400)
|
"""
Function to a Form
------------------
Converts function signatures into django forms.
NOTE: with enums it's recommended to use the string version, since the value will be used as the
representation to the user (and generally numbers aren't that valuable)
"""
import enum
import inspect
import re
import typing
from dataclasses import dataclass
from django import forms
from django.db.models import TextChoices
from defopt import Parameter, signature, _parse_docstring
from typing import Dict, Optional
from typing import Type
import pathlib
from . import utils
class Text(str):
"""Wrapper class to be able to handle str types"""
pass
type2field_type = {
int: forms.IntegerField,
str: forms.CharField,
bool: forms.BooleanField,
Optional[bool]: forms.NullBooleanField,
Text: forms.CharField,
pathlib.Path: forms.CharField,
dict: forms.JSONField,
}
type2widget = {Text: forms.Textarea()}
@dataclass
class _Function:
func: callable
name: str
form_class: object
doc: str
@classmethod
def from_function(cls, func, *, name, config=None):
form_class = function_to_form(func, name=name, config=config)
return cls(func=func, name=name, form_class=form_class, doc=form_class.__doc__)
def doc_mapping(str) -> Dict[str, str]:
return {}
def function_to_form(func, *, config: dict = None, name: str = None) -> Type[forms.Form]:
"""Convert a function to a Django Form.
Args:
func: the function to be changed
config: A dictionary with keys ``widgets`` and ``fields`` each mapping types/specific
arguments to custom fields
"""
name = name or func.__qualname__
sig = signature(func)
# i.e., class body for form
fields = {}
defaults = {}
for parameter in sig.parameters.values():
field = param_to_field(parameter, config)
fields[parameter.name] = field
if parameter.default is not Parameter.empty:
defaults[parameter.name] = parameter.default
if isinstance(field, forms.TypedChoiceField):
field._parameter_name = parameter.name
field._func_name = name
if parameter.default and parameter.default is not Parameter.empty:
for potential_default in [parameter.default.name, parameter.default.value]:
if any(potential_default == x[0] for x in field.choices):
defaults[parameter.name] = potential_default
break
else:
raise ValueError(
f"Cannot figure out how to assign default for {parameter.name}: {parameter.default}"
)
fields["__doc__"] = re.sub("\n+", "\n", _parse_docstring(inspect.getdoc(func)).text)
form_name = "".join(part.capitalize() for part in func.__name__.split("_"))
class BaseForm(forms.Form):
_func = func
_input_defaults = defaults
# use this for ignoring extra args from createview and such
def __init__(self, *a, instance=None, user=None, **k):
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
super().__init__(*a, **k)
self.user = user
self.helper = FormHelper(self)
self.helper.add_input(Submit("submit", "Execute!"))
def execute_function(self):
# TODO: reconvert back to enum type! :(
return func(**self.cleaned_data)
def save(self):
from .models import ExecutionResult
obj = ExecutionResult(func_name=name, input_json=self.cleaned_data, user=self.user)
obj.save()
return obj
return type(form_name, (BaseForm,), fields)
def is_optional(annotation):
if args := typing.get_args(annotation):
return len(args) == 2 and args[-1] == type(None)
def get_type_from_annotation(param: Parameter):
if is_optional(param.annotation):
return typing.get_args(param.annotation)[0]
if typing.get_origin(param.annotation):
raise ValueError(f"Field {param.name}: type class {param.annotation} not supported")
return param.annotation
@dataclass
class Coercer:
"""Wrapper so that we handle implicit string conversion of enum types :("""
enum_type: object
by_attribute: bool = False
def __call__(self, value):
try:
resp = self._call(value)
return resp
except Exception as e:
import traceback
traceback.print_exc()
raise
def _call(self, value):
if value and isinstance(value, self.enum_type):
return value
if self.by_attribute:
return getattr(self.enum_type, value)
try:
resp = self.enum_type(value)
return resp
except ValueError as e:
import traceback
traceback.print_exc()
try:
return self.enum_type(int(value))
except ValueError as f:
# could not coerce to int :(
pass
if isinstance(value, str):
# fallback to some kind of name thing if necesary
try:
return getattr(self.enum_type, value)
except AttributeError:
pass
raise e from e
assert False, "Should not get here"
def param_to_field(param: Parameter, config: dict = None) -> forms.Field:
"""Convert a specific arg to a django form field.
See function_to_form for config definition."""
config = config or {}
all_types = {**type2field_type, **(config.get("fields") or {})}
widgets = {**type2widget, **(config.get("widgets") or {})}
field_type = None
kwargs = {}
kind = get_type_from_annotation(param)
is_enum_class = False
try:
is_enum_class = issubclass(kind, enum.Enum)
except TypeError:
# e.g. stupid generic type stuff
pass
if is_enum_class:
utils.EnumRegistry.register(kind)
field_type = forms.TypedChoiceField
kwargs.update(make_enum_kwargs(param=param, kind=kind))
else:
field_type = get_for_param_by_type(all_types, param=param, kind=kind)
if not field_type:
raise ValueError(f"Field {param.name}: Unknown field type: {param.annotation}")
# do not overwrite kwargs if already specified
kwargs = {**extra_kwargs(field_type, param), **kwargs}
if field_type == forms.BooleanField and param.default is None:
field_type = forms.NullBooleanField
widget = get_for_param_by_type(widgets, param=param, kind=kind)
if widget:
kwargs["widget"] = widget
return field_type(**kwargs)
def make_enum_kwargs(kind, param):
kwargs = {}
if all(isinstance(member.value, int) for member in kind):
kwargs["choices"] = TextChoices(
f"{kind.__name__}Enum", {member.name: (member.name, member.name) for member in kind}
).choices
kwargs["coerce"] = Coercer(kind, by_attribute=True)
else:
# we set up all the kinds of entries to make it a bit easier to do the names and the
# values...
kwargs["choices"] = TextChoices(
f"{kind.__name__}Enum",
dict(
[(member.name, (str(member.value), member.name)) for member in kind]
+ [(str(member.value), (member.name, member.name)) for member in kind]
),
).choices
kwargs["coerce"] = Coercer(kind)
# coerce back
if isinstance(param.default, kind):
kwargs["initial"] = param.default.value
return kwargs
def get_for_param_by_type(dct, *, param, kind):
"""Grab the appropriate element out of dict based on param type.
Ordering:
1. param.name (i.e., something custom specified by user)
2. param.annotation
3. underlying type if typing.Optional
"""
if elem := dct.get(param.name, dct.get(param.annotation, dct.get(kind))):
return elem
for k, v in dct.items():
if inspect.isclass(k) and issubclass(kind, k) or k == kind:
return v
def extra_kwargs(field_type, param):
kwargs = {}
if param.default is Parameter.empty:
kwargs["required"] = True
elif param.default is None:
kwargs["required"] = False
# need this so that empty values get passed through to function correctly!
if "empty_value" in inspect.signature(field_type).parameters:
kwargs["empty_value"] = None
else:
kwargs["required"] = False
kwargs.setdefault("initial", param.default)
if param.doc:
kwargs["help_text"] = param.doc
return kwargs
|
"""The :mod:`paddy.Default_Numerics` module contains functions for numeric
problems.
Routine listings
----------------
eval_numeric(object)
polynomial(object)
poly(x,seed)
trig_inter(x,seed)
mse_func(target, output)
gramacy_lee()
See Also
--------
:mod:`paddy.Paddy_Runner`
Notes
-----
The background information regarding how general functions and classes used to
initiate an instance of :class:`~paddy.Paddy_Runner.PFARunner` that are also
provided as builtins in the :mod:`~paddy.Default_Numerics` module is described
in the :mod:`~paddy.Paddy_Runner` module.
Examples
--------
"""
import math
import numpy as np
from paddy.Paddy_Parameter import PaddyParameter
def gramacy_lee():
"""Return Gramacy and Lee function coordinates.
This function generates the *x-y* coordinates used to evaluate
interpolation of the Gramacy and Lee function via a default or user
defined intrerpolation function.
See Also
--------
:func:`eval_numeric`
References
----------
.. [1] Gramacy, R. B., & Lee, H. K. (2012). Cases for the nugget in
modeling computer experiments. Statistics and Computing, 22(3), 713-722
.
"""
x_dummy = -0.5
y_list = []
counter = 0
x_list = []
x_range = np.arange(-.5, 2.501, 0.001)
while counter < len(x_range):
x_list.append(round(x_range[counter], 3))
counter = counter+1
counter = 0
while counter < len(x_range):
y_list.append(((math.sin(10*math.pi*x_dummy))/float(2*x_dummy))+((x_dummy-1)**4))
x_dummy = x_dummy + 0.001
counter = counter+1
return x_list, y_list
def mse_func(target, mse_input):
"""Return error of interpolation.
This function returns eval_numeric()the mean squared error for two sequences of
numeric values.
"""
counter = 0
error = []
while counter < len(target):
error.append((abs(target[counter]-mse_input[counter]))**2)
counter = counter+1
ave_error = sum(error)/float(len(target))
return ave_error
def poly(x_list, seed):
r"""Return cordinates of a polynomial.
This function returns the 2-D cordinates of a polynomial, and is used
for interpolation problems.
Parameters
----------
x_list : list
A list of numerics that defines the x values evaluated over.
seed : array-like, shape = (parameters, 2)
A numpy array of parameters generated by paddy. Refer to
:mod:`~paddy.Paddy_runner` for details regarding generation of the
array.
Returns
-------
output : list of floats
A list of float values representing the y values of the input x.
See Also
--------
:func:`eval_numeric`
Notes
-----
The polynomial is defined as:
.. math:: \sum_{k=0}^{n}a_{k}x^k
Where ``a`` represents the sequence of parameter values generated by
paddy. ``x`` is simply the `x` parameter of numeric values that the
polynomial is evaluated over.
"""
counter = 0
output = []
s_x = []
while counter < len(x_list):
k = 0
xn_out = []
while k < len(seed):
###sums at x1###
temp_y = seed[k][0]*(x_list[counter]**k)
xn_out.append(temp_y)
k = k+1
s_x.append(sum(xn_out))
counter = counter+1
output = s_x
return output
def trig_inter(x_list, seed):
r"""Return cordinates of a trigonometric polynomial.
This function returns the 2-D cordinates of a trigonometric polynomial,
and is used for interpolation problems.
Parameters
----------
x_list : list
A list of numerics that defines the x values evaluated over.
seed : array-like, shape = (parameters,2)
A numpy array of parameters generated by paddy. Refer to
:mod:`~paddy.Paddy_runner` for details regarding generation of the
array.
See Also
--------
:func:`eval_numeric`
Notes
-----
The trigonometric polynomial is defined as:
.. math:: t(x)=a_{0}+\sum_{n=1}^{N}a_{n}\cos(nx)+\sum_{n=1}^{N}b_{n}\sin(
nx)
Where ``a`` represents the sequence of parameter values generated by
paddy and are passed as the odd indices of `seed` and ``b`` being even.
Notice that this would mean that a trigonometric polynomial of the 10th
degree would be defined by a paddy space of 11 individual parameters.
Refrences
---------
.. [1] Rudin, Walter (1987), Real and complex analysis (3rd ed.), New York
: McGraw-Hill, ISBN 978-0-07-054234-1, MR 0924157.
"""
if len(seed) % 2 == 0:
print("must use odd value for dim greater than 1!")
return
counter = 0
s_x = []
while counter < len(x_list):
###this evaluates over x
n = 1
xn_out = []
while n <= ((len(seed)-1)/2.0):
###evaluates sums of cos and sin
alpha = ((n*2)-1)
beta = (n*2)
temp_y = ((
seed[alpha][0])*(math.cos((n*x_list[counter]))))+(
(seed[beta][0])*(math.sin((n*x_list[counter]))))
xn_out.append(temp_y)
n = n+1
xn_out.append(seed[0][0])
s_x.append(sum(xn_out))
counter = counter + 1
return s_x
class EvalNumeric(object):
r"""Return error after evaluating a numeric function.
This class can be used to evaluate an optimization problem where fiting
to a numeric value is the goal. This can range from interpolation to
min-max optimization.
Parameters
----------
seed : array-like, shape = (parameters,2)
A numpy array of parameters generated by paddy. Refer to
:mod:`~paddy.Paddy_runner` for details regarding generation of the
array.
error_func : function, optional (default : mse_func)
An error function thats output is minimized by paddy. If user defined
, make sure to note the internal sign change used to maximize the
negative error value returned by `eval_numeric`.
t_func : function, optional (default : gramacy_lee)
A target function that provides the input and target value(s) for the
error and fitting functions.
f_func : function, optional (default : trig_inter)
A fitting function that recieves an input generated by the target
function, and returns an output to compate with the desired answer
being optimized twards.
Returns
-------
error : float
A numeric value that serves as the fitness value for the seed
evaluated. This is defined as the negative output of the error
function.
See Also
--------
:func:`mse_func`
:func:`gramacy_lee`
:func:`trig_inter`
Notes
-----
The default functionalities of this function is evaluation of seed values
as the coeficents of a trigonometric polynomial used to interpolate the
Gramacy and Lee function, where the evaluation metric function is the
mean squared error of the x-y coordinates of Gramacy and Lee and the
generated polynomial where :math:`x\in[ -0.5,2.5 ]` with a resolution of
0.001. The default and user defined instances of this class use the
method `eval(seed)` to evaluate and return results.
"""
def __init__(self, error_func=mse_func,
t_func=gramacy_lee, f_func=trig_inter):
self.error_func = error_func
self.t_func = t_func
self.f_func = f_func
self.x, self.answer = self.t_func()
def eval(self, seed):
"""Method of `eval_numeric`.
"""
seed = seed
y_val = self.f_func(self.x, seed)
#made negative for maximization problem
error = -self.error_func(self.answer, y_val)
return error
class Polynomial(object):
"""Generate paddy space that is apt for polynomial fitting.
See Also
--------
:class:`eval_numeric`
:class:`paddy.Paddy_Parameter.PaddyParameter`
"""
def __init__(self, length, scope, gausian_type, normalization=True,
limits=True):
if limits is False:
limit_init = None
else:
limit_init = [-scope, scope]
counter = 0
while counter < length:
vars(self)['polly{0}'.format(counter)] = (
PaddyParameter(param_range=[-scope, scope, scope*.05],
param_type='continuous', limits=limit_init,
gaussian=gausian_type,
normalization=normalization)
)
counter += 1
|
import komand
from .schema import QuarantineInput, QuarantineOutput
from komand.exceptions import ConnectionTestException
# Custom imports below
class Quarantine(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='quarantine',
description='Quarantine a host',
input=QuarantineInput(),
output=QuarantineOutput())
def run(self, params={}):
mac_address = params.get('mac_address')
policy = params.get('policy')
self.connection.ers.apply_anc_endpoint_mac(mac_address, policy)
results = self.connection.ers.get_anc_endpoint()
try:
results = results['SearchResult']['resources']
except KeyError:
self.logger.error('Raw results from ANC endpoint query: ' + str(results))
raise
except Exception as e:
self.logger.error(e)
self.logger.error('Raw results from ANC endpoint query: ' + str(results))
raise
try:
for x in results:
find = self.connection.ers.get_anc_endpoint(x['id'])
if find['ErsAncEndpoint']['macAddress'] == mac_address:
return {'ers_anc_endpoint': find['ErsAncEndpoint']}
except KeyError:
self.logger.error('Raw results from ANC endpoint query: ' + str(results))
self.logger.error('Raw results from ANC endpoint query on IDs: ' + x)
raise
except Exception as e:
self.logger.error(e)
self.logger.error('Raw results from ANC endpoint query: ' + str(results))
self.logger.error('Raw results from ANC endpoint query on IDs: ' + x)
raise
self.logger.error('MAC address, ' + mac_address)
self.logger.error('Policy, ' + policy)
self.logger.error('Raw results from ANC endpoint query,' + str(results))
raise ConnectionTestException(cause="Cisco ISE did not return a result",
assistance="Check your configuration settings and confirm your policy exists and "
"MAC address are correct")
def test(self):
test = self.connection.ers.get_endpoint()
return {'endpoint_list': test}
|
from Restaurante9_10 import Restaurant
r1 = Restaurant('La Cazuela', 'mexicana')
r1.desc()
r1.abierto()
|
from typing import Any, Optional
import urwid
from ..attributed_text_widget import ATWidget
from ..markup import AT
from .edit_widgets import EditWidget
from .euph_config import EuphConfig
from .launch_application import launch
from .room_widget import RoomWidget
__all__ = ["SingleRoomApplication", "launch_single_room_application"]
class ChooseRoomWidget(urwid.WidgetWrap):
def __init__(self,
room_style: Optional[str] = None,
error_style: Optional[str] = None,
error_room_style: Optional[str] = None,
) -> None:
self._room_style = room_style
self._error_style = error_style
self._error_room_style = error_room_style
self.error = None
self.edit = EditWidget("Choose a room:", caption="&", style=room_style)
self.filler = urwid.Filler(self.edit)
super().__init__(self.filler)
def render(self, size: Any, focus: Any) -> Any:
if self.error:
width, _ = size
rows = self.error.rows((width,), focus)
self.filler.bottom = rows
return super().render(size, focus)
def set_error(self, text: Any) -> None:
self.error = text
self.pile = urwid.Pile([
self.error,
self.edit,
])
self.filler = urwid.Filler(self.pile)
self._w = self.filler
def unset_error(self) -> None:
self.error = None
self.filler = urwid.Filler(self.edit)
self._w = self.filler
def could_not_connect(self, roomname: str) -> None:
text = AT("Could not connect to ", style=self._error_style)
text += AT("&" + roomname, style=self._error_room_style)
text += AT(".\n", style=self._error_style)
self.set_error(ATWidget(text, align=urwid.CENTER))
def invalid_room_name(self, reason: str) -> None:
text = AT(f"Invalid room name: {reason}\n",
style=self._error_style)
self.set_error(ATWidget(text, align=urwid.CENTER))
class SingleRoomApplication(urwid.WidgetWrap):
# The characters in the ALPHABET make up the characters that are allowed in
# room names.
ALPHABET = "abcdefghijklmnopqrstuvwxyz0123456789"
# These are other characters or character combinations necessary for the
# editor to function well.
ALLOWED_EDITOR_KEYS = {
"backspace", "delete",
"left", "right",
"home", "end",
}
def __init__(self, config: EuphConfig) -> None:
self.c = config
self.choose_room = ChooseRoomWidget(
room_style=self.c.room_style,
error_style=self.c.error_style,
error_room_style=self.c.error_room_style,
)
super().__init__(self.choose_room)
def selectable(self) -> bool:
return True
def switch_to_choose(self) -> None:
self.choose_room.could_not_connect(self.choose_room.edit.text)
self._w = self.choose_room
def keypress(self, size: Any, key: str) -> Optional[str]:
if self._w is self.choose_room:
if key == "esc":
raise urwid.ExitMainLoop()
self.choose_room.unset_error()
if key == "enter":
roomname = self.choose_room.edit.text
if roomname:
room = RoomWidget(roomname, self.c)
urwid.connect_signal(room, "close", self.switch_to_choose)
self._w = room
room.connect()
else:
self.choose_room.invalid_room_name("too short")
elif not super().selectable():
return key
# Make sure we only enter valid room names
elif key.lower() in self.ALPHABET:
return super().keypress(size, key.lower())
elif key in self.ALLOWED_EDITOR_KEYS:
return super().keypress(size, key)
return None
elif super().selectable():
return super().keypress(size, key)
return key
def launch_single_room_application() -> None:
launch(SingleRoomApplication)
|
#!/usr/bin/env python3
#-*-coding:utf-8-*-
"""
Trajectory evaluator for pure LiDAR-based chain SLAM, cartographer and GMapping
"""
import os
import csv
import argparse
import numpy as np
import matplotlib.pyplot as plt
from sys import argv
from numpy.linalg import svd, det
all_method_names = ("cartographer", "chain SLAM", "GMapping", "bosch")
error_cats = ("traj abs", "traj sqr", "rot abs", "rot sqr")
data_cats = ("mean", "std")
# 这是我写过的最迷惑的列表生成式
__header__ = ["%s %s %s"%(n, error_cat, data_cat) for n in all_method_names for error_cat in error_cats for data_cat in data_cats]
# 输入的轨迹应有shape (轨迹点数,4),第一维度是时间戳
class TrajEval:
def __init__(self) -> None:
pass
@staticmethod
def goodAngle(ang):
if ang > np.pi:
return ang - 2 * np.pi
elif ang < -np.pi:
return ang + 2 * np.pi
return ang
@staticmethod
def readFromFile(path:str, use_flag:bool = False)->np.array:
with open(path, 'r') as file:
raw_all = file.readlines()
result = np.zeros((len(raw_all), 5 if use_flag else 4))
for i, line in enumerate(raw_all):
stripped = line.split(' ')
result[i, 0] = float(stripped[0]) / 1e9
for j in range(1, 3):
result[i, j] = float(stripped[j])
if stripped[3][-1] == '\n':
angle = float(stripped[3][:-1])
else:
angle = float(stripped[3])
while angle > np.pi:
angle -= np.pi * 2
while angle < -np.pi:
angle += np.pi * 2
result[i, 3] = angle
if use_flag:
result[i, -1] = int(stripped[-1][0])
if use_flag:
return result[result[:, -1] > 0.5, :-1]
return result
@staticmethod
def icpPostProcess(data:np.array, gt:np.array):
query_c, pair_c = np.zeros(2, dtype = float), np.zeros(2, dtype = float)
qpt = np.zeros((2, 2), dtype = float)
query_start_p, base_start_p = data[0, 1:-1], gt[0, 1:-1]
for i, time_stamp in enumerate(data[:, 0]):
idx, _ = TrajEval.binarySearch(time_stamp, gt[:, 0])
query_pt = data[i, 1:-1]
value_pt = gt[idx, 1:-1]
query_c += query_pt
pair_c += value_pt
qpt += (query_pt - query_start_p).reshape(-1, 1) @ (value_pt - base_start_p).reshape(1, -1)
query_c /= data.shape[0]
pair_c /= data.shape[0]
qpt -= data.shape[0] * (query_c - query_start_p).reshape(-1, 1) @ (pair_c - base_start_p).reshape(1, -1)
trans = np.zeros((3, 3), dtype = float)
u, s, vh = svd(qpt)
r = vh.T @ u.T
rr = vh.T @ np.array([[1., 0.], [0., det(r)]]) @ u.T
translation = (pair_c.reshape(-1, 1) - rr @ query_c.reshape(-1, 1))[:2].ravel()
delta_angle = np.arctan2(rr[1, 0], rr[0, 0])
result = np.zeros_like(data)
result[:, 0] = data[:, 0]
for i in range(data.shape[0]):
result[i, 1:-1] = data[i, 1:-1].reshape(1, -1) @ rr.T + translation
result[i, -1] = TrajEval.goodAngle(data[i, -1] + delta_angle)
return result
@staticmethod
def binarySearch(val, target_list):
s = 0
e = len(target_list) - 1
while e > s + 1:
m = (e + s) >> 1
v_mid = target_list[m]
if val < v_mid:
e = m
elif val > v_mid:
s = m
else:
return m, target_list[m]
if abs(target_list[s] - val) < abs(target_list[e] - val):
return s, target_list[s]
else:
return e, target_list[e]
# 如果只是简单的2D最邻近点,是不行的,轨迹可能交叉,此时的2D最邻近点可能选错,总是会选择更好的那个点,故最靠谱的只能是时间戳上的最邻近
# 尽量使得不同算法的时间戳是对应的, 输入是 (point_num, 4)
# 需要计算轨迹误差,隔周
@staticmethod
def temperalNearestNeighborEvaluator(src:np.array, dst:np.array):
# 点数更多的作为被查找对象(GMapping可能就是稀疏轨迹)
def compare(base:np.array, query:np.array):
abs_traj_2d = []
sqr_traj_2d = []
abs_rot_2d = []
sqr_rot_2d = []
for i, time_stamp in enumerate(query[:, 0]):
idx, _ = TrajEval.binarySearch(time_stamp, base[:, 0])
query_pt = query[i, 1:]
value_pt = base[idx, 1:]
diff = query_pt - value_pt
if diff[2] > np.pi:
diff[2] -= 2 * np.pi
if diff[2] < -np.pi:
diff[2] += 2 * np.pi
diff_dist = np.linalg.norm(diff[:2])
abs_traj_2d.append(diff_dist)
sqr_traj_2d.append(diff_dist ** 2)
abs_rot_2d.append(abs(diff[2]))
sqr_rot_2d.append(diff[2] ** 2)
return abs_traj_2d, sqr_traj_2d, abs_rot_2d, sqr_rot_2d
if len(dst) >= len(src):
return compare(dst, src)
else:
return compare(src, dst)
# 简单可视化操作
@staticmethod
def visualizeOneTrajectory(traj:np.array, dot_size = 0, color = None, label = None, linewidth = None, dot_color = None):
plt.plot(traj[:, 1], traj[:, 2], c = color, label = label, linewidth = linewidth)
if dot_size > 0:
plt.scatter(traj[:, 1], traj[:, 2], c = dot_color, s = dot_size)
@staticmethod
def visualizePerDimError(x_error:list, y_error:list, theta_error:list, make_abs = False):
plt.subplot(1, 3, 1)
xs = np.arange(len(x_error))
transform = np.abs if make_abs else np.array
plt.subplot(1, 3, 1)
plt.plot(xs, transform(x_error), label = 'absolute error x-axis')
plt.subplot(1, 3, 2)
plt.plot(xs, transform(y_error), label = 'absolute error y-axis')
plt.subplot(1, 3, 3)
plt.plot(xs, transform(theta_error), label = 'absolute error theta-axis')
@staticmethod
def visualizeTrajError(error2d:list, error3d:list):
plt.subplot(1, 2, 1)
xs = np.arange(len(error2d))
plt.plot(xs, error2d, label = 'absolute error x-axis')
plt.subplot(1, 2, 2)
plt.plot(xs, error3d, label = 'absolute error y-axis')
# 只比较L1 2D轨迹以及旋转误差 / L2 2D轨迹与旋转误差
@staticmethod
def mean_std(abs_traj_2d, sqr_traj_2d, abs_rot_2d, sqr_rot_2d, verbose = True):
abs_traj_2d_mean, abs_traj_2d_std = np.mean(abs_traj_2d), np.std(abs_traj_2d)
sqr_traj_2d_mean, sqr_traj_2d_std = np.mean(sqr_traj_2d), np.std(sqr_traj_2d)
abs_rot_2d_mean, abs_rot_2d_std = np.mean(abs_rot_2d), np.std(abs_rot_2d)
sqr_rot_2d_mean, sqr_rot_2d_std = np.mean(sqr_rot_2d), np.std(sqr_rot_2d)
if verbose:
print("Absolute Trajectory Error:\t%.6lf±%.6f"%(abs_traj_2d_mean, abs_traj_2d_std))
print("Squared Trajectory Error:\t%.6lf±%.6f"%(sqr_traj_2d_mean, sqr_traj_2d_std))
print("Absolute Rotational Error:\t%.6lf±%.6f"%(abs_rot_2d_mean, abs_rot_2d_std))
print("Squared Rotational Error:\t%.6lf±%.6f"%(sqr_rot_2d_mean, sqr_rot_2d_std))
return [abs_traj_2d_mean, abs_traj_2d_std, sqr_traj_2d_mean, sqr_traj_2d_std, abs_rot_2d_mean, abs_rot_2d_std, sqr_rot_2d_mean, sqr_rot_2d_std]
@staticmethod
def visualizeWithGT(folder:str, traj_name:str, use_flag:bool = False):
gt_file_path = folder + "c_gt.tjc"
traj_path = folder + traj_name + ".tjc"
gt_data = TrajEval.readFromFile(gt_file_path)
traj_data = TrajEval.readFromFile(traj_path, use_flag)
TrajEval.visualizeOneTrajectory(gt_data, 3, label = 'gt trajectory')
TrajEval.visualizeOneTrajectory(traj_data, 3, label = '%s trajectory'%(traj_name))
plt.legend()
plt.grid(axis = 'both')
# plt.show()
@staticmethod
def append2csv(csv_pos, all_info):
with open(csv_pos, 'a', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(all_info)
csv_file.close()
@staticmethod
def visualizeDifferentMethods(folder:str, traj_id:int, verbose:bool = True, save_fig:bool = True):
gt_file_path = folder + "c_gt.tjc"
gt_data = TrajEval.readFromFile(gt_file_path)
if save_fig:
plt.figure(dpi = 320)
plt.rcParams["figure.figsize"] = [10.0, 6.0]
plt.subplots_adjust(left=0.075, right=0.925, top=0.925, bottom=0.075)
all_methods = ("carto_%d.tjc"%(traj_id), "c_traj_%d.tjc"%(traj_id), "gmap_%d.tjc"%(traj_id), "bosch_%d.tjc"%(traj_id))
csv_path = folder + "eval_result.csv"
if traj_id == 0:
with open(csv_path, 'w') as tmp:
writer = csv.writer(tmp)
writer.writerow(__header__)
all_results = []
for method, name in zip(all_methods, all_method_names):
path = folder + method
if not os.path.exists(path):
all_results.extend([0. for _ in range(8)])
continue
data = TrajEval.readFromFile(path)
data = TrajEval.icpPostProcess(data, gt_data)
TrajEval.visualizeOneTrajectory(data, 2, label = '%s trajectory'%(name), linewidth=1)
abs_traj_2d, sqr_traj_2d, abs_rot_2d, sqr_rot_2d = TrajEval.temperalNearestNeighborEvaluator(data, gt_data)
if verbose:
print("=============== %s evaluation results ==================="%(name))
all_info = TrajEval.mean_std(abs_traj_2d, sqr_traj_2d, abs_rot_2d, sqr_rot_2d, verbose = verbose)
all_results.extend(all_info)
TrajEval.visualizeOneTrajectory(gt_data, 2, label = 'Ground truth', linewidth=1)
TrajEval.append2csv(folder + "eval_result.csv", all_results)
plt.legend()
plt.grid(axis = 'both')
if save_fig == True:
fig_folder_path = folder + "eval_figs/"
if not os.path.exists(fig_folder_path):
os.mkdir(fig_folder_path)
plt.savefig(fig_folder_path + "eval_result_%d.png"%(traj_id))
else:
plt.show()
def binarySearchTest():
test_list = [1, 5, 7, 10, 13.5, 16.2, 20, 26, 30, 31, 32, 36]
queries = [4, 5.9, 6.0, 6.2, 8.5, 8.499, 8.9, -1, 60, 31.3, 20, 26, 26.1, 0.9999, 1.01, 1, 36.000000000001]
for q in queries:
idx, result = TrajEval.binarySearch(q, test_list)
print("Query: %f, result: %d, %f"%(q, idx, result))
def trajectory_compare():
TrajEval.visualizeDifferentMethods(argv[1], int(argv[2]))
def visualizeICP():
folder = argv[1]
traj_id = int(argv[2])
gt_file_path = folder + "c_gt.tjc"
gt_data = TrajEval.readFromFile(gt_file_path)
carto_path = folder + "carto_%d.tjc"%(traj_id)
carto_data = TrajEval.readFromFile(carto_path)
align_carto = TrajEval.icpPostProcess(carto_data, gt_data)
plt.figure(dpi = 320)
plt.plot(gt_data[:, 1], gt_data[:, 2], label = 'gt')
plt.scatter(gt_data[:, 1], gt_data[:, 2], s = 1)
plt.plot(carto_data[:, 1], carto_data[:, 2], label = 'before')
plt.scatter(carto_data[:, 1], carto_data[:, 2], s = 1)
plt.plot(align_carto[:, 1], align_carto[:, 2], label = 'after')
plt.scatter(align_carto[:, 1], align_carto[:, 2], s = 1)
plt.grid(axis = 'both')
plt.legend()
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--save_fig", default = False, action = "store_true", help = "Save figures during evaluation")
parser.add_argument("-v", "--verbose", default = False, action = "store_true", help = "Output evaluation info in the terminal")
parser.add_argument("--path", type = str, help = "Folder which contains all the trajectories")
parser.add_argument("--traj_num", type = int, default = 0, help = "Trajectory file id")
args = parser.parse_args()
TrajEval.visualizeDifferentMethods(args.path, args.traj_num, args.verbose, args.save_fig)
|
import numpy as np
def evaluate(submission, correct):
"""Checks if two grids (submitted and correct) are the same.
Args:
submission (list, numpy array): The submitted grid. If it is
a list, it will be converted to a numpy array.
correct (list, numpy): The ground truth grid. If it is a
a list, it will be converted to a numpy array
Returns:
bool: True if the two grids are the same, False otherwise.
"""
submission = np.array(submission)
correct = np.array(correct)
return np.array_equal(submission, correct)
|
#
# Copyright (C) 2016 Codethink Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
"""
manual - Manual build element
=============================
The most basic build element does nothing but allows users to
add custom build commands to the array understood by the :mod:`BuildElement <buildstream.buildelement>`
The empty configuration is as such:
.. literalinclude:: ../../../src/buildstream/plugins/elements/manual.yaml
:language: yaml
See :ref:`built-in functionality documentation <core_buildelement_builtins>` for
details on common configuration options for build elements.
"""
from buildstream import BuildElement
# Element implementation for the 'manual' kind.
class ManualElement(BuildElement):
# pylint: disable=attribute-defined-outside-init
BST_MIN_VERSION = "2.0"
# Plugin entry point
def setup():
return ManualElement
|
import json
from policyuniverse.expander_minimizer import minimize_statement_actions
BAD_ACTIONS = {
"*",
"s3:*",
}
def policy(resource):
if resource["Policy"] is None:
return True
iam_policy = json.loads(resource["Policy"])
for statement in iam_policy["Statement"]:
# Only check statements granting access
if statement["Effect"] != "Allow":
continue
minimized_actions = minimize_statement_actions(statement)
if BAD_ACTIONS.intersection(minimized_actions):
return False
return True
|
from __future__ import print_function
# Converting Day to Seconds
day_num = int(input("Input number of days: "))
sec_in_day = day_num * 24 * 60 * 60
print ("There are {0} seconds in {1} day(s)".format(sec_in_day, day_num))
hours_num = int(input("Input number of hours: "))
sec_in_hour = hours_num * 60 * 60
print ("There are {0} seconds in {1} hour(s)".format(sec_in_hour, hours_num))
minutes_num = int(input("Input number of minutes: "))
sec_in_min = minutes_num * 60
print ("There are {0} seconds in {1} minute(s)".format(sec_in_min, minutes_num))
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
ext_modules = [
Extension("voxelize",
sources=["voxelize.pyx"],
libraries=["m"] # Unix-like specific
)
]
setup(
ext_modules=cythonize(ext_modules)
)
|
#!/usr/bin/env python3
#
# Simple utility to generate a credentials config file as we update the main credentials.json
#
#
import json
with open( 'credentials.json', 'r') as jsonfile:
full = json.load( jsonfile );
sample = dict()
for service,one in full.items():
servicedict = dict()
for key,val in one.items():
if key.endswith('_url' ) or key.endswith('_callback') or key.endswith('_param'):
servicedict[key] = val
else:
servicedict[key] = "***"
sample[service] = servicedict
with open( 'credentials.sample.json', 'w' ) as outfile:
json.dump( sample, outfile, indent = 2, sort_keys=True );
|
# Copyright (c) 2020 6WIND S.A.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import functools
import logging
from typing import Any, Callable
from libyang.data import DNode
from _sysrepo import ffi, lib
from .errors import SysrepoError, check_call
from .util import c2str, is_async_func
LOG = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class Subscription:
"""
Python representation of `sr_subscription_ctx_t *`.
.. attention::
Do not instantiate this class manually, use `SysrepoSession.subscribe_*`.
"""
def __init__(
self,
callback: Callable,
private_data: Any = None,
asyncio_register: bool = False,
strict: bool = False,
include_implicit_defaults: bool = True,
):
"""
:arg callback:
The python callback function or coroutine function used when subscribing.
:arg private_data:
Opaque data used when subscribing, will be passed to callback.
:arg asyncio_register:
Add this subscription event pipe into asyncio event loop monitored file
descriptors. When the event pipe becomes readable, call process_events().
:arg strict:
If True, reject data with no schema definition from rpc output parameters
and operational data callbacks. Otherwise, ignore unknown data and log a
warning message.
:arg include_implicit_defaults:
If True, include implicit default nodes into Change objects passed to module
change callbacks and into input parameters passed to RPC/action callbacks.
"""
if is_async_func(callback) and not asyncio_register:
raise ValueError(
"%s is an async function, asyncio_register is mandatory" % callback
)
self.callback = callback
self.private_data = private_data
self.asyncio_register = asyncio_register
self.strict = strict
self.include_implicit_defaults = include_implicit_defaults
if asyncio_register:
self.loop = asyncio.get_event_loop()
else:
self.loop = None
self.tasks = {}
self.cdata = None
self.fd = -1
self.handle = ffi.new_handle(self)
def init(self, cdata) -> None:
"""
Initialization of this object is not complete after calling __init__. The
sr_subscription_ctx_t object is allocated by sysrepo when calling one of
sr_*_subscribe functions and we need to pass self.handle to these functions so
that this subscription can be forwarded to C callbacks.
Subscription.init() is called just after sr_*_subscribe functions to complete
initialization. See SysrepoSession.subscribe_* functions for more details.
if self.asyncio_register is True, add this subscription event pipe to the
monitored file descriptors for reading in asyncio event loop.
:arg "sr_subscription_ctx_t *" cdata:
The subscription pointer allocated by sysrepo.
"""
if self.cdata is not None:
raise RuntimeError("init was already called once")
self.cdata = cdata
if self.asyncio_register:
self.fd = self.get_fd()
self.loop.add_reader(self.fd, self.process_events)
def get_fd(self) -> int:
"""
Get the event pipe of a subscription. Event pipe can be used in `select()`,
`poll()`, or similar functions to listen for new events. It will then be ready
for reading.
"""
fd_p = ffi.new("int *")
check_call(lib.sr_get_event_pipe, self.cdata, fd_p)
return fd_p[0]
def unsubscribe(self) -> None:
"""
Unsubscribes from a subscription acquired by any of sr_*_subscribe calls and
releases all subscription-related data.
Removes self.fd from asyncio event loop monitored file descriptors.
"""
if self.cdata is None:
return
if self.asyncio_register and self.fd != -1:
self.loop.remove_reader(self.fd)
try:
check_call(lib.sr_unsubscribe, self.cdata)
finally:
self.cdata = None
for t in list(self.tasks.values()):
t.cancel()
self.tasks.clear()
def process_events(self) -> None:
"""
Called when self.fd becomes readable.
"""
check_call(lib.sr_process_events, self.cdata, ffi.NULL, ffi.NULL)
def task_done(self, task_id: Any, event: str, task: asyncio.Task) -> None:
"""
Called when self.callback is an async function/method and it has finished. This
calls self.process_events() so that the C callback is invoked again with the
same arguments (request_id, event) and we can return the actual result.
"""
if task.cancelled():
self.tasks.pop(task_id, None)
return
try:
if event in ("update", "change", "rpc", "oper"):
# The task result will be evaluated in the C callback.
# It will return the result to sysrepo.
self.process_events()
else:
# Sysrepo does not care about the result of the callback.
# This will raise the exception here if any occured in the task
# and will be logged (i.e. not lost).
self.tasks.pop(task_id, None)
task.result()
except Exception:
LOG.exception("failure in task: %r", task)
# ------------------------------------------------------------------------------
EVENT_NAMES = {
lib.SR_EV_UPDATE: "update",
lib.SR_EV_CHANGE: "change",
lib.SR_EV_DONE: "done",
lib.SR_EV_ABORT: "abort",
lib.SR_EV_ENABLED: "enabled",
lib.SR_EV_RPC: "rpc",
}
NOTIF_TYPES = {
lib.SR_EV_NOTIF_REALTIME: "realtime",
lib.SR_EV_NOTIF_REPLAY: "replay",
lib.SR_EV_NOTIF_REPLAY_COMPLETE: "replay_complete",
lib.SR_EV_NOTIF_STOP: "stop",
lib.SR_EV_NOTIF_SUSPENDED: "suspended",
lib.SR_EV_NOTIF_RESUMED: "resumed",
}
# ------------------------------------------------------------------------------
@ffi.def_extern(name="srpy_module_change_cb")
def module_change_callback(session, module, xpath, event, req_id, priv):
"""
Callback to be called on the event of changing datastore content of the specified
module.
This python function mapped to the C srpy_module_change_cb function. When the C
srpy_module_change_cb function is called by libsysrepo.so, this function is called
with the same arguments.
:arg "sr_session_ctx_t *" session:
Implicit session (do not stop) with information about the changed data.
:arg "const char *" module:
Name of the module where the change has occurred.
:arg "const char *" xpath:
XPath used when subscribing, NULL if the whole module was subscribed to.
:arg "sr_event_t" event:
Type of the callback event that has occurred.
:arg "uint32_t" req_id:
Request ID unique for the specific module_name. Connected events for one request
(SR_EV_CHANGE and SR_EV_DONE, for example) have the same request ID.
:arg "void *" priv:
Private context opaque to sysrepo. Contains a CFFI handle to the Subscription
python object.
:returns:
User error code (sr_error_t).
:raises:
IMPORTANT: This function *CANNOT* raise any exception. The C callstack does not
handle that well and when it happens the outcome is undetermined. Make sure to
catch all errors and log them so they are not lost.
"""
try:
# convert C arguments to python objects.
from .session import SysrepoSession # circular import
session = SysrepoSession(session, True)
module = c2str(module)
xpath = c2str(xpath)
root_xpath = ("/%s:*" % module) if xpath is None else xpath
subscription = ffi.from_handle(priv)
callback = subscription.callback
private_data = subscription.private_data
event_name = EVENT_NAMES[event]
if is_async_func(callback):
task_id = (event, req_id)
if task_id not in subscription.tasks:
# ATTENTION: the implicit session passed as argument will be
# freed when this function returns. The async callback must NOT
# keep a reference on it as it will be invalid. Changes must be
# gathered now.
changes = list(
session.get_changes(
root_xpath + "//.",
include_implicit_defaults=subscription.include_implicit_defaults,
)
)
task = subscription.loop.create_task(
callback(event_name, req_id, changes, private_data)
)
task.add_done_callback(
functools.partial(subscription.task_done, task_id, event_name)
)
subscription.tasks[task_id] = task
if event not in (lib.SR_EV_UPDATE, lib.SR_EV_CHANGE):
# Return immediately, process_events will not be called in
# subscription.task_done. Sysrepo does not care about the
# result of the operation.
return lib.SR_ERR_OK
task = subscription.tasks[task_id]
if not task.done():
return lib.SR_ERR_CALLBACK_SHELVE
del subscription.tasks[task_id]
task.result() # raise error if any
else:
changes = list(
session.get_changes(
root_xpath + "//.",
include_implicit_defaults=subscription.include_implicit_defaults,
)
)
callback(event_name, req_id, changes, private_data)
return lib.SR_ERR_OK
except SysrepoError as e:
if (
event in (lib.SR_EV_UPDATE, lib.SR_EV_CHANGE)
and e.msg
and isinstance(session, SysrepoSession)
and isinstance(xpath, str)
):
session.set_error(xpath, e.msg)
return e.rc
except BaseException as e:
# ATTENTION: catch all exceptions!
# including KeyboardInterrupt, CancelledError, etc.
# We are in a C callback, we cannot let any error pass
LOG.exception("%r callback failed", locals().get("callback", priv))
if (
event in (lib.SR_EV_UPDATE, lib.SR_EV_CHANGE)
and isinstance(session, SysrepoSession)
and isinstance(xpath, str)
):
session.set_error(xpath, str(e))
return lib.SR_ERR_CALLBACK_FAILED
# ------------------------------------------------------------------------------
@ffi.def_extern(name="srpy_oper_data_cb")
def oper_data_callback(session, module, xpath, req_xpath, req_id, parent, priv):
"""
Callback to be called when operational data at the selected xpath are requested.
:arg "sr_session_ctx_t *" session:
Implicit session (do not stop).
:arg "const char *" module:
Name of the affected module.
:arg "const char *" xpath:
XPath identifying the subtree that is supposed to be provided, same as the one
used for the subscription.
:arg "const char *" req_xpath:
XPath as requested by a client. Can be NULL.
:arg "uint32_t" req_id:
Request ID unique for the specific module name.
:arg "struct lyd_node **" parent:
Pointer to an existing parent of the requested nodes. Is NULL for top-level
nodes. Callback is supposed to append the requested nodes to this data subtree
and return either the original parent or a top-level node.
:arg "void *" priv:
Private context opaque to sysrepo. Contains a CFFI handle to the Subscription
python object.
:returns:
User error code (sr_error_t).
:raises:
IMPORTANT: This function *CANNOT* raise any exception. The C callstack does not
handle that well and when it happens the outcome is undetermined. Make sure to
catch all errors and log them so they are not lost.
"""
try:
# convert C arguments to python objects.
from .session import SysrepoSession # circular import
session = SysrepoSession(session, True)
module = c2str(module)
xpath = c2str(xpath)
req_xpath = c2str(req_xpath)
subscription = ffi.from_handle(priv)
callback = subscription.callback
private_data = subscription.private_data
if is_async_func(callback):
task_id = req_id
if task_id not in subscription.tasks:
task = subscription.loop.create_task(callback(req_xpath, private_data))
task.add_done_callback(
functools.partial(subscription.task_done, task_id, "oper")
)
subscription.tasks[task_id] = task
task = subscription.tasks[task_id]
if not task.done():
return lib.SR_ERR_CALLBACK_SHELVE
del subscription.tasks[task_id]
oper_data = task.result()
else:
oper_data = callback(req_xpath, private_data)
if isinstance(oper_data, dict):
# convert oper_data to a libyang.DNode object
ly_ctx = session.get_ly_ctx()
dnode = ly_ctx.get_module(module).parse_data_dict(
oper_data, data=True, strict=subscription.strict, validate=False
)
if dnode is not None:
if parent[0]:
root = DNode.new(ly_ctx, parent[0]).root()
root.merge(dnode, destruct=True)
else:
# The FFI bindings of libyang and sysrepo are different.
# Casting is required.
parent[0] = ffi.cast("struct lyd_node *", dnode.cdata)
elif oper_data is not None:
raise TypeError(
"bad return type from %s (expected dict or None)" % callback
)
return lib.SR_ERR_OK
except SysrepoError as e:
if e.msg and isinstance(session, SysrepoSession) and isinstance(xpath, str):
session.set_error(xpath, e.msg)
return e.rc
except BaseException as e:
# ATTENTION: catch all exceptions!
# including KeyboardInterrupt, CancelledError, etc.
# We are in a C callback, we cannot let any error pass
LOG.exception("%r callback failed", locals().get("callback", priv))
if isinstance(session, SysrepoSession) and isinstance(xpath, str):
session.set_error(xpath, str(e))
return lib.SR_ERR_CALLBACK_FAILED
# ------------------------------------------------------------------------------
@ffi.def_extern(name="srpy_rpc_tree_cb")
def rpc_callback(session, xpath, input_node, event, req_id, output_node, priv):
"""
Callback to be called for the delivery of an RPC/action.
:arg "sr_session_ctx_t *" session:
Implicit session (do not stop).
:arg "const char *" xpath:
Simple operation path identifying the RPC/action.
:arg "const struct lyd_node *" input_node:
Data tree of input parameters.
:arg "sr_event_t" event:
Type of the callback event that has occurred.
:arg "uint32_t" req_id:
Request ID unique for the specific xpath.
:arg "struct lyd_node *" output_node:
Data tree of output parameters. Should be allocated on heap, will be freed by
sysrepo after sending of the RPC response.
:arg "void *" priv:
Private context opaque to sysrepo. Contains a CFFI handle to the Subscription
python object.
:returns:
User error code (sr_error_t).
:raises:
IMPORTANT: This function *CANNOT* raise any exception. The C callstack does not
handle that well and when it happens the outcome is undetermined. Make sure to
catch all errors and log them so they are not lost.
"""
try:
# convert C arguments to python objects.
from .session import SysrepoSession # circular import
session = SysrepoSession(session, True)
subscription = ffi.from_handle(priv)
callback = subscription.callback
private_data = subscription.private_data
event_name = EVENT_NAMES[event]
ly_ctx = session.get_ly_ctx()
rpc_input = DNode.new(ly_ctx, input_node)
xpath = rpc_input.path()
# strip all parents, only preserve the input tree
input_dict = next(
iter(
rpc_input.print_dict(
include_implicit_defaults=subscription.include_implicit_defaults,
absolute=False,
).values()
)
)
if is_async_func(callback):
task_id = (event, req_id)
if task_id not in subscription.tasks:
task = subscription.loop.create_task(
callback(xpath, input_dict, event_name, private_data)
)
task.add_done_callback(
functools.partial(subscription.task_done, task_id, event_name)
)
subscription.tasks[task_id] = task
task = subscription.tasks[task_id]
if not task.done():
return lib.SR_ERR_CALLBACK_SHELVE
del subscription.tasks[task_id]
output_dict = task.result()
else:
output_dict = callback(xpath, input_dict, event_name, private_data)
if event != lib.SR_EV_RPC:
# May happen when there are multiple callback registered for the
# same RPC. If one of the callbacks has failed, the other ones will
# be called with SR_EV_ABORT. In that case, abort early and do
# not return the RPC output data to sysrepo.
return lib.SR_ERR_OK
if isinstance(output_dict, dict):
# update output_node with contents of output_dict
DNode.new(ly_ctx, output_node).merge_data_dict(
output_dict, rpcreply=True, strict=subscription.strict, validate=False
)
elif output_dict is not None:
raise TypeError(
"bad return type from %s (expected dict or None)" % callback
)
return lib.SR_ERR_OK
except SysrepoError as e:
if e.msg and isinstance(session, SysrepoSession) and isinstance(xpath, str):
session.set_error(xpath, e.msg)
return e.rc
except BaseException as e:
# ATTENTION: catch all exceptions!
# including KeyboardInterrupt, CancelledError, etc.
# We are in a C callback, we cannot let any error pass
LOG.exception("%r callback failed", locals().get("callback", priv))
if isinstance(session, SysrepoSession) and isinstance(xpath, str):
session.set_error(xpath, str(e))
return lib.SR_ERR_CALLBACK_FAILED
# ------------------------------------------------------------------------------
@ffi.def_extern(name="srpy_event_notif_tree_cb")
def event_notif_tree_callback(session, notif_type, notif, timestamp, priv):
"""
Callback to be called when a notification is received.
:arg "sr_session_ctx_t *" session:
Implicit session (do not stop).
:arg "sr_ev_notif_type_t" notif_type:
Type of the notification event that has occurred.
:arg "const struct lyd_node *" notif:
Data tree of input parameters.
:arg "uint32_t" timestamp:
Timestamp of the notification.
:arg "void *" priv:
Private context opaque to sysrepo. Contains a CFFI handle to the Subscription
python object.
:returns:
None
:raises:
IMPORTANT: This function *CANNOT* raise any exception. The C callstack does not
handle that well and when it happens the outcome is undetermined. Make sure to
catch all errors and log them so they are not lost.
"""
try:
# convert C arguments to python objects.
from .session import SysrepoSession # circular import
session = SysrepoSession(session, True)
subscription = ffi.from_handle(priv)
callback = subscription.callback
private_data = subscription.private_data
notif_type = NOTIF_TYPES[notif_type]
ly_ctx = session.get_ly_ctx()
notif_dnode = DNode.new(ly_ctx, notif)
xpath = notif_dnode.path()
notif_dict = next(
iter(
notif_dnode.print_dict(
include_implicit_defaults=subscription.include_implicit_defaults,
absolute=False,
).values()
)
)
if is_async_func(callback):
task = subscription.loop.create_task(
callback(xpath, notif_type, notif_dict, timestamp, private_data)
)
task.add_done_callback(
functools.partial(subscription.task_done, None, "notif")
)
else:
callback(xpath, notif_type, notif_dict, timestamp, private_data)
except BaseException:
# ATTENTION: catch all exceptions!
# including KeyboardInterrupt, CancelledError, etc.
# We are in a C callback, we cannot let any error pass
LOG.exception("%r callback failed", locals().get("callback", priv))
|
from django.db import models
class MigrationScripts(models.Model):
app = models.CharField(max_length=255)
applied_on = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=255)
|
#
# Copyright (c) 2019-2020 Carnegie Mellon University
# All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
"""Utility functions for object detection providers"""
import os
import shutil
from pathlib import Path
import requests
from logzero import logger
from tqdm import tqdm
# Registry to track available tensorflow detectors
REGISTRY = {}
XDG_CACHE_DIR = (
Path(os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))) / "opentpod-tools"
)
def get_cache_entry(entry_name):
"""returns path to entry in cache or None"""
cache_dir = XDG_CACHE_DIR / entry_name
if not cache_dir.exists():
return None
return cache_dir
def download_and_extract_url_tarball_to_cache_dir(tarball_url, entry_name):
"""Download and extract tarball from url to a cache_dir
entry_name: used as subdir name within the cache_dir. no '/' allowed
"""
XDG_CACHE_DIR.mkdir(parents=True, exist_ok=True)
tarball_basename = tarball_url.split("/")[-1]
download_path = XDG_CACHE_DIR / tarball_basename
if not download_path.exists():
with tqdm(
desc=f"Downloading {tarball_basename}",
total=float("inf"),
unit="B",
unit_scale=True,
unit_divisor=1024,
) as progress, requests.get(tarball_url, stream=True) as response:
response.raise_for_status()
progress.total = int(response.headers.get("Content-Length", 0))
try:
with open(download_path, "wb") as output_file:
for chunk in response.iter_content(chunk_size=4096):
output_file.write(chunk)
progress.update(len(chunk))
except IOError as exc:
output_file.unlink()
logger.exception(exc)
raise exc
logger.info("Extracting %s", download_path)
output_dir = XDG_CACHE_DIR / entry_name
output_dir.mkdir(parents=True, exist_ok=True)
# try to unpack, remove partially extracted cache state on failure
# shutil.unpack_archive docs don't list all possible exception so we have to
# use a generic catch-all.
try:
shutil.unpack_archive(os.fspath(download_path), os.fspath(output_dir))
except Exception as exc: # pylint: disable=broad-except
logger.exception(exc)
shutil.rmtree(output_dir)
|
"""This python script contains the libraries and functions needed to run the other two ML py scripts in this folder.
"""
# load libraries necessary for both offline and live ML scripts
import argparse
import numpy as np
import pandas as pd
from datetime import timedelta
from datetime import datetime
import math
# ML Model
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.metrics import f1_score
from sklearn import preprocessing
from sklearn.metrics import plot_confusion_matrix
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
import imblearn
#from imblearn.over_sampling import RandomOverSampler #optional if you want to try oversampling imbalanced data
import joblib
# define path names
supporting_files_path = "../Supporting_Files/"
# Queue values from definition
QUEUE_START_SPEED = 0.00
QUEUE_FOLLOWING_SPEED = (10.0 *0.681818) # convert ft/sec to mph
QUEUE_HEADWAY_DISTANCE = 20.0 #ft, in queue definition
QUEUE_DISTANCE_WITHIN_STOP_POINT = 20 #ft
def read_BSMs_file(BSMs_X_filename):
"""Read BSMs csv file and store in pandas dataframe (df)"""
df = pd.read_csv(supporting_files_path + BSMs_X_filename)
#print(df.head())
return df
def read_max_queues_Y_file(max_queues_Y_filename):
"""Read max queues by link and 30 secs csv file and store in pandas dataframe (y_df)"""
y_df = pd.read_csv(supporting_files_path + max_queues_Y_filename)
# clean up the time column so it only has hour, mins, seconds
y_df['time'] = y_df['time_30'].str[7:15]
y_df.drop(['time_30'], axis=1, inplace=True)
#print(y_df.head())
return y_df
def read_veh_lengths_file(veh_lengths_filename):
"""Read vehicle lengths by type csv file and store in pandas dataframe (veh_len_df)"""
veh_len_df = pd.read_csv(supporting_files_path + veh_lengths_filename)
#print(veh_len_df.head())
return veh_len_df
def read_stoplines_file(stoplines_filename):
"""Read stop lines by lane and link csv file and store in pandas dataframe (stopline_df)"""
stoplines_df = pd.read_csv(supporting_files_path + stoplines_filename)
#print(stopline_df.head())
return stoplines_df
def read_signal_timing_file(signal_timing_filename):
"""Read signal timing (% time green, amber, red) by link and 30 secs csv file and store in pandas dataframe (signals_df)"""
signals_df = pd.read_csv(supporting_files_path + signal_timing_filename)
# reformat trantime_30sec to string, convert timedelta to string
signals_df['time_30'] = signals_df['transtime_30sec'].astype(str).str[7:15]
signals_df.drop('transtime_30sec',axis='columns', inplace=True)
#print(signals_df.head())
return signals_df
def read_link_corners_file(link_corner_points_filename):
"""Read link corner points (X,Y) csv file and store in pandas dataframe (link_points_df)"""
link_points_df = pd.read_csv(supporting_files_path + link_corner_points_filename)
#print(link_points_df.head())
return link_points_df
def create_avg_stoplines_df(stoplines_df_name):
"""Create an aggregated stoplines_avg_df with the average stopline X, Y for each link and number of lanes"""
stopline_avg_df = stoplines_df_name.groupby('Link_ID')['stopline_X'].mean().reset_index(name='mean_X')
stopline_avg_df['mean_Y'] = stoplines_df_name.groupby('Link_ID')['stopline_Y'].mean().reset_index(name='mean_Y').iloc[:,1]
stopline_avg_df['n_lanes'] = stoplines_df_name.groupby('Link_ID')['Lane'].count().reset_index().iloc[:,1]
stopline_avg_df['link_direction'] = stoplines_df_name.groupby('Link_ID')['Link_direction'].first().reset_index().iloc[:,1]
#print(stopline_avg_df.head())
return stopline_avg_df
def assign_BSMs_to_roadway_links(df_BSM_name, link_points_df_name):
"""This function assigns each BSM to its roadway link based on the link corner points. This assignment is an approximation.
Please note: You will likely need to update the latter part of this function to account for your roadway geometry. It is currently designed to separate NB and SB links and filter BSMs based on their headings.
Please note: This could take a while to run on all BSMs."""
# Initialize a new empty column for assigned_LinkID
df_BSM_name['assigned_linkID'] = np.nan
# save the links in an array/list
links = link_points_df_name.Link.unique()
# find the min and max X,Ys from the four corner points for each link
for idx, link in enumerate(links):
min_x = link_points_df_name.loc[link_points_df_name['Link']==link, ['X_lower_L','X_lower_R','X_upper_L', 'X_upper_R']].min().min()
max_x = link_points_df_name.loc[link_points_df_name['Link']==link, ['X_lower_L','X_lower_R','X_upper_L', 'X_upper_R']].max().max()
min_y = link_points_df_name.loc[link_points_df_name['Link']==link, ['Y_lower_L','Y_lower_R','Y_upper_L', 'Y_upper_R']].min().min()
max_y = link_points_df_name.loc[link_points_df_name['Link']==link, ['Y_lower_L','Y_lower_R','Y_upper_L', 'Y_upper_R']].max().max()
mask = ((df_BSM_name.X>=min_x) & (df_BSM_name.X<=max_x) & (df_BSM_name.Y>=min_y) & (df_BSM_name.Y<=max_y))
# BSMs that fall in that link mask are assigned to that link in a new column
df_BSM_name.loc[mask,'assigned_linkID'] = link
# if assigned_linkID is NA then drop
df = df_BSM_name.loc[df_BSM_name['assigned_linkID'].notna()]
#print(df.shape, "shape of bsms assigned to links before heading filter")
# **You will need to edit this section for your specific network geometry
# figure out whether NB or SB for each of the six Flatbush links
# then have two filters: one for NB and one for SB links
# if BSM heading is NOT in the range for NB then drop
# if BSM is NOT in the range for SB then drop
# for TCA BSM heading, 0 degrees is due N
SB_links = ['5677903#1', '221723366', '221731899#2.168']
NB_links = ['349154287#5', '-139919720#0.114', '-139919720#0', '-23334090#0']
# create a filter mask for South links
maskS = ((df['assigned_linkID'].isin(SB_links)) & (df['Heading']>90) & (df['Heading']<270))
# create a filter mask for North links
maskN = ((df['assigned_linkID'].isin(NB_links)) & ((df['Heading']>270) | (df['Heading']<90)))
# combine the S and N masks
df = df.loc[maskS | maskN]
# Add this step to combine links -139919720#0.114 and -139919720#0
df['assigned_linkID'].replace("-139919720#0.114", "-139919720#0", inplace=True)
#print(df.shape, "shape of corrected bsms")
return df
def format_result(result):
"""Format result of simulation time float into datetime,
add 7 hours for Flatbush simulation data because 0.0 corresponds to 7:00 AM,
output is time in HH:MM:SS.microseconds"""
seconds = int(result)
microseconds = (result * 1000000) % 1000000
output = timedelta(0, seconds, microseconds) + timedelta(hours=7)
return output
def distance_between(origin_x, origin_y, destination_x, destination_y):
"""Calculate the distance between two points. """
return ((origin_x - destination_x)**2 + (origin_y - destination_y)**2)**.5
def min_dist_to_avg_stopbar(group_row, stopline_avg_df_name):
"""Calculate the distance between each grouping min X,Y and the avg stopline X,Y for that link.
This distance is an approximation and depends on the direction and curvature of the links."""
row_stop_X, row_stop_Y = stopline_avg_df_name.loc[stopline_avg_df_name['Link_ID']==group_row['assigned_linkID'],['mean_X','mean_Y']].values[0]
direction = stopline_avg_df_name.loc[stopline_avg_df_name['Link_ID']==group_row['assigned_linkID'],['link_direction']].values[0]
# we have to do the opposite for N direction links
if direction =='N':
row_dist = distance_between(group_row['max_X'], group_row['max_Y'], row_stop_X, row_stop_Y)
else:
row_dist = distance_between(group_row['min_X'], group_row['min_Y'], row_stop_X, row_stop_Y)
return(row_dist)
def max_dist_to_avg_stopbar(group_row, stopline_avg_df_name):
"""Calculate the max distance between each grouping max X,Y and the avg stopline X,Y for that link."""
row_stop_X, row_stop_Y = stopline_avg_df_name.loc[stopline_avg_df_name['Link_ID']==group_row['assigned_linkID'],['mean_X','mean_Y']].values[0]
direction = stopline_avg_df_name.loc[stopline_avg_df_name['Link_ID']==group_row['assigned_linkID'],['link_direction']].values[0]
# Do the opposite for N direction links
if direction =='N':
row_dist = distance_between(group_row['min_X'], group_row['min_Y'], row_stop_X, row_stop_Y)
else:
row_dist = distance_between(group_row['max_X'], group_row['max_Y'], row_stop_X, row_stop_Y)
return(row_dist)
def to_seconds(s):
"""Convert the 30 second time string to a float."""
hr, minute, sec = [float(x) for x in s.split(':')]
total_seconds = hr*3600 + minute*60 + sec
return total_seconds
def join_veh_len_to_BSM_df(df_BSM_name, veh_len_df_name):
"""Join vehicle length column to main BSM df."""
df = df_BSM_name.merge(veh_len_df_name[['Type_ID','Length (ft)']], how='left', left_on='Type', right_on='Type_ID')
df = df.drop(['Type_ID'], axis=1)
#print(df.head())
return df
def feature_engineering(df_BSM_name, stopline_avg_df_name):
"""Create grouped df with new aggregated features based on BSMs."""
# Our main group by object (road link, 30 second time chunk)
gb_main = df_BSM_name.groupby(['transtime_30sec','assigned_linkID'])[['BSM_tmp_ID']].count()
# creating the base aggregated DF to add columns to
base_df = gb_main.add_suffix('_Count').reset_index()
gb = df_BSM_name.groupby(['transtime_30sec','assigned_linkID'])
# get the value of the average vehicle length across all BSMs
avg_veh_len = df_BSM_name["Length (ft)"].mean()
median_veh_len = df_BSM_name["Length (ft)"].median()
# count # of BSMs in 30 sec-link grouping with 0 speed
base_df['num_BSMs_0speed'] = gb['Speed'].apply(lambda x: (x==0).sum()).reset_index(name='sum').iloc[:,2]
# number of BSMs with speed between 0 and QUEUE_FOLLOWING_SPEED
base_df['num_BSMs_0_to_following_speed'] = gb['Speed'].apply(lambda x: ((x>0) & (x<=QUEUE_FOLLOWING_SPEED)).sum()).reset_index(name='sum').iloc[:,2]
# number of BSMs greater than QUEUE_FOLLOWING_SPEED
base_df['num_BSMs_above_following_speed'] = gb['Speed'].apply(lambda x: (x>QUEUE_FOLLOWING_SPEED).sum()).reset_index(name='sum').iloc[:,2]
# number of BSMs with vehicle length above average
base_df['num_BSMs_len_above_avg'] = gb["Length (ft)"].apply(lambda x: (x>avg_veh_len).sum()).reset_index(name='sum').iloc[:,2]
# number of BSMs with vehicle length equal to or below average
base_df['num_BSMs_len_below_avg'] = gb["Length (ft)"].apply(lambda x: (x<=avg_veh_len).sum()).reset_index(name='sum').iloc[:,2]
# get AVG vehicle length per grouping
base_df['veh_len_avg_in_group'] = gb["Length (ft)"].mean().reset_index(name='sum').iloc[:,2]
# get the MEDIAN vehicle length per grouping
base_df['veh_len_med_in_group'] = gb["Length (ft)"].median().reset_index(name='sum').iloc[:,2]
# speed standard deviation
base_df['speed_stddev'] = gb["Speed"].std().reset_index().iloc[:,2]
# max speed in grouping
base_df['speed_max'] = gb["Speed"].max().reset_index().iloc[:,2]
# acceleration standard deviation
# could be called "Instant_Acceleration" instead of "Avg_Acceleration"
base_df['accel_stddev'] = gb["Avg_Acceleration"].std().reset_index().iloc[:,2]
# number of BSMs with negative acceleration
base_df['num_BSMs_neg_accel'] = gb["Avg_Acceleration"].apply(lambda x: (x<=0).sum()).reset_index(name='sum').iloc[:,2]
# Max X per group
base_df['max_X'] = gb["X"].max().reset_index(name='max').iloc[:,2]
# Max Y per group
base_df['max_Y'] = gb["Y"].max().reset_index(name='max').iloc[:,2]
# Min X per group
base_df['min_X'] = gb["X"].min().reset_index(name='max').iloc[:,2]
# Min Y per group
base_df['min_Y'] = gb["Y"].min().reset_index(name='max').iloc[:,2]
# distance between Max X,Y and Min X,Y to indicate how far apart the BSMs are
base_df['max_distance_between_BSMs'] = base_df.apply(lambda row: distance_between(row['max_X'],row['max_Y'],row['min_X'],row['min_Y']), axis=1)
# direction matters here
base_df['min_dist_to_stopbar'] = base_df.apply(lambda row: min_dist_to_avg_stopbar(row, stopline_avg_df_name), axis=1)
base_df['max_dist_to_stopbar'] = base_df.apply(lambda row: max_dist_to_avg_stopbar(row, stopline_avg_df_name), axis=1)
# Create frequency of braking features
base_df['num_braking'] = gb["brakeStatus"].apply(lambda x: (x>0).sum()).reset_index(name='sum').iloc[:,2]
base_df['num_braking_hard'] = gb["hardBraking"].apply(lambda x: (x>0).sum()).reset_index(name='sum').iloc[:,2]
# change it to 1/0 yes/no hard braking occurred
base_df['hard_braking'] = 0
mask_hardBrake = (base_df['num_braking_hard']>0)
base_df.loc[mask_hardBrake,'hard_braking'] = 1
# convert timedelta to string
base_df['time_30'] = base_df['transtime_30sec'].astype(str).str[7:15]
# avoid dropping for creating the queue_count column for previous 30 secs
base_df.drop('transtime_30sec',axis='columns', inplace=True)
return base_df
def handle_missing_data(df_xy_name, df_BSM_name):
"""Since python's scikit-learn will not accept rows with NA, this function replaces NAs with 0 for most columns except the veh len avg and median.
Assumption: rows with NA for the BSM features did not see any BSMs sent from a CV in that link and time period.
Please note: Handling missing data is more an art than a science! You may want to handle NAs differently in your case."""
# explore missingness first
#print(df_xy_name.isna().sum(), "total NA")
## Handling NaN rows in df_xy
#replace NaN with 0
df_xy = df_xy_name.fillna(0)
# get the value of the average vehicle length across all BSMs
avg_veh_len = df_BSM_name["Length (ft)"].mean()
median_veh_len = df_BSM_name["Length (ft)"].median()
# replace 0 values for veh_len_avg_in_group with the average over all BSMs
mask_veh_avg = (df_xy['veh_len_avg_in_group']==0)
df_xy.loc[mask_veh_avg,'veh_len_avg_in_group'] = avg_veh_len
# replace 0 values for veh_len_med_in_group with the median over all BSMs
mask_veh_med = (df_xy['veh_len_med_in_group']==0)
df_xy.loc[mask_veh_med,'veh_len_med_in_group'] = median_veh_len
return df_xy
def label_encode_categorical_features(df_xy_name):
"""Label encode categorical features for Random Forest.
Please note: encoding is also more of an art than a science. You could try different methods."""
# label encode the link IDs
df_xy_name["link"] = df_xy_name["link"].astype('category')
df_xy_name["link_encoded"] = df_xy_name["link"].cat.codes
# now drop the original 'link' column (you don't need it anymore)
df_xy_name.drop(['link'],axis=1, inplace=True)
# label encode the roadway direction
df_xy_name["link_direction"] = df_xy_name["link_direction"].astype('category')
df_xy_name["link_direction_encoded"] = df_xy_name["link_direction"].cat.codes
# now drop the original 'link_direction' column (you don't need it anymore)
df_xy_name.drop(['link_direction'],axis=1, inplace=True)
# needs to be numeric to work in sklearn
df_xy_name['time_float'] = df_xy_name['time'].apply(to_seconds)
return df_xy_name
def feature_scaling_X(X_name):
"""Minmax scale the features X.
Please note: Feature scaling is not necessarily required for a Random Forest classifier, but other classifiers require it."""
min_max_scaler = preprocessing.MinMaxScaler()
#Minmax scaler
X_minmax = min_max_scaler.fit_transform(X_name)
return X_minmax
|
# -*- coding: utf-8 -*-
""" Utilities, specific to the PointDetector stuff. """
import os
import copy
import cv2
def write_annotated_image(input_image, ids, image_points, image_file_name):
"""
Takes an input image, copies it, annotates point IDs and writes
to the testing output folder.
"""
image = copy.deepcopy(input_image)
font = cv2.FONT_HERSHEY_SIMPLEX
for counter in range(ids.shape[0]):
cv2.putText(image,
str(ids[counter][0]),
(int(image_points[counter][0]),
int(image_points[counter][1])),
font, 0.5, (0, 255, 0), 2, cv2.LINE_AA)
split_path = os.path.splitext(image_file_name)
previous_dir = os.path.dirname(split_path[0])
previous_dir = os.path.basename(previous_dir)
base_name = os.path.basename(split_path[0])
output_file = os.path.join('tests/output', base_name
+ '_'
+ previous_dir
+ '_labelled.png')
cv2.imwrite(output_file, image)
|
# Copyright 2019 The Dreamer Authors. Copyright 2020 Plan2Explore Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import itertools
import os
import pickle
import six
import sys
import threading
import time
import traceback
import uuid
import numpy as np
import tensorflow as tf
class StopExperiment(Exception):
pass
class WorkerConflict(Exception):
pass
class SkipRun(Exception):
pass
class Experiment(object):
def __init__(
self, basedir, rolloutdir, process_fn, start_fn=None, resume_fn=None,
num_runs=None, worker_name=None, ping_every=30, resume_runs=True):
self._basedir = basedir
self._rolloutdir = rolloutdir
self._process_fn = process_fn
self._start_fn = start_fn
self._resume_fn = resume_fn
self._num_runs = num_runs
self._worker_name = worker_name or str(uuid.uuid4())
self._ping_every = ping_every
self._ping_stale = ping_every and 2 * ping_every
self._resume_runs = resume_runs
def __iter__(self):
for current_run in self._generate_run_numbers():
logdir = self._basedir and os.path.join(
self._basedir, '{:05}'.format(current_run))
rolloutdir = self._rolloutdir and os.path.join(
self._rolloutdir, '{:05}'.format(current_run))
try:
run = Run(
logdir, rolloutdir, self._process_fn, self._start_fn, self._resume_fn,
self._worker_name, self._ping_every, self._ping_stale,
self._resume_runs)
yield run
except SkipRun:
continue
except StopExperiment:
print('Stopping.')
break
print('All runs completed.')
def _generate_run_numbers(self):
if self._num_runs:
# Don't wait initially and see if there are runs that are already stale.
runs = np.random.permutation(range(self._num_runs))
for run in runs:
yield run + 1
# At the end, wait for all dead runs to become stale, and pick them up.
# This is necessary for complete runs of workers that died very recently.
if self._ping_stale:
time.sleep(self._ping_stale)
for run in runs:
yield run + 1
else:
# For infinite runs, we want to always finish started jobs first.
# Therefore, we need to wait for them to become stale in the beginning.
if self._ping_stale:
time.sleep(self._ping_stale)
for run in itertools.count():
yield run + 1
class Run(object):
def __init__(
self, logdir, rolloutdir, process_fn, start_fn, resume_fn, worker_name,
ping_every=30, ping_stale=60, reuse_if_exists=True):
self._logdir = os.path.expanduser(logdir)
self._rolloutdir = os.path.expanduser(rolloutdir)
self._process_fn = process_fn
self._worker_name = worker_name
self._ping_every = ping_every
self._ping_stale = ping_stale
self._logger = self._create_logger()
try:
if self._should_start():
self._claim()
self._logger.info('Start.')
self._init_fn = start_fn
elif reuse_if_exists and self._should_resume():
self._claim()
self._logger.info('Resume.')
self._init_fn = resume_fn
else:
raise SkipRun
except WorkerConflict:
self._logger.info('Leave to other worker.')
raise SkipRun
self._thread = None
self._running = [True]
self._thread = threading.Thread(target=self._store_ping_thread)
self._thread.daemon = True # Terminate with main thread.
self._thread.start()
def __iter__(self):
try:
args = self._init_fn and self._init_fn(self._logdir)
if args is None:
args = ()
if not isinstance(args, tuple):
args = (args,)
for value in self._process_fn(self._logdir, self._rolloutdir, *args):
if not self._running[0]:
break
yield value
self._logger.info('Done.')
self._store_done()
except WorkerConflict:
self._logging.warn('Unexpected takeover.')
raise SkipRun
except Exception as e:
exc_info = sys.exc_info()
self._handle_exception(e)
six.reraise(*exc_info)
finally:
self._running[0] = False
self._thread and self._thread.join()
def _should_start(self):
if not self._logdir:
return True
if tf.gfile.Exists(os.path.join(self._logdir, 'PING')):
return False
if tf.gfile.Exists(os.path.join(self._logdir, 'DONE')):
return False
return True
def _should_resume(self):
if not self._logdir:
return False
if tf.gfile.Exists(os.path.join(self._logdir, 'DONE')):
# self._logger.debug('Already done.')
return False
if not tf.gfile.Exists(os.path.join(self._logdir, 'PING')):
# self._logger.debug('Not started yet.')
return False
last_worker, last_ping = self._read_ping()
if last_worker != self._worker_name and last_ping < self._ping_stale:
# self._logger.debug('Already in progress.')
return False
return True
def _claim(self):
if not self._logdir:
return False
self._store_ping(overwrite=True)
if self._ping_every:
time.sleep(self._ping_every)
if self._read_ping()[0] != self._worker_name:
raise WorkerConflict
self._store_ping()
def _store_done(self):
if not self._logdir:
return
with tf.gfile.Open(os.path.join(self._logdir, 'DONE'), 'w') as file_:
file_.write('\n')
def _store_fail(self, message):
if not self._logdir:
return
with tf.gfile.Open(os.path.join(self._logdir, 'FAIL'), 'w') as file_:
file_.write(message + '\n')
def _read_ping(self):
if not tf.gfile.Exists(os.path.join(self._logdir, 'PING')):
return None, None
try:
with tf.gfile.Open(os.path.join(self._logdir, 'PING'), 'rb') as file_:
last_worker, last_ping = pickle.load(file_)
duration = (datetime.datetime.utcnow() - last_ping).total_seconds()
return last_worker, duration
except (EOFError, IOError, tf.errors.NotFoundError):
raise WorkerConflict
def _store_ping(self, overwrite=False):
if not self._logdir:
return
try:
last_worker, _ = self._read_ping()
if last_worker is None:
self._logger.info("Create directory '{}'.".format(self._logdir))
tf.gfile.MakeDirs(self._logdir)
elif last_worker != self._worker_name and not overwrite:
raise WorkerConflict
# self._logger.debug('Store ping.')
with tf.gfile.Open(os.path.join(self._logdir, 'PING'), 'wb') as file_:
pickle.dump((self._worker_name, datetime.datetime.utcnow()), file_)
except (EOFError, IOError, tf.errors.NotFoundError):
raise WorkerConflict
def _store_ping_thread(self):
if not self._ping_every:
return
try:
last_write = time.time()
self._store_ping(self._logdir)
while self._running[0]:
if time.time() >= last_write + self._ping_every:
last_write = time.time()
self._store_ping(self._logdir)
# Only wait short times to quickly react to abort.
time.sleep(0.01)
except WorkerConflict:
self._running[0] = False
def _handle_exception(self, exception):
message = ''.join(traceback.format_exception(*sys.exc_info()))
self._logger.warning('Exception:\n{}'.format(message))
self._logger.warning('Failed.')
try:
self._store_done()
self._store_fail(message)
except Exception:
message = ''.join(traceback.format_exception(*sys.exc_info()))
template = 'Exception in exception handler:\n{}'
self._logger.warning(template.format(message))
def _create_logger(self):
run_name = self._logdir and os.path.basename(self._logdir)
methods = {}
for name in 'debug info warning'.split():
methods[name] = lambda unused_self, message: print(
'Worker {} run {}: {}'.format(self._worker_name, run_name, message))
return type('PrefixedLogger', (object,), methods)()
|
#! /usr/bin/env python3
import os
import sys
import stat
import math
from mule_local.JobMule import *
jg = JobGeneration()
############################################################
# Settings that will stay the same for reference & test jobs
############################################################
# run simulation on sphere, not plane
jg.compile.plane_spectral_space = 'disable'
jg.compile.plane_spectral_dealiasing = 'disable'
jg.compile.sphere_spectral_space = 'enable'
jg.compile.sphere_spectral_dealiasing = 'enable'
# enable MPI
jg.compile.sweet_mpi = 'enable'
jg.compile.libsph = 'enable'
jg.compile.threading = 'off'
jg.compile.libfft = 'enable'
# Enable quad math per default for CI REXI method
jg.compile.quadmath = 'enable'
jg.runtime.output_file_mode = 'bin'
# Verbosity mode
jg.runtime.verbosity = 2
# Mode and Physical resolution
jg.runtime.space_res_spectral = 64
jg.runtime.space_res_physical = None
# Benchmark
jg.runtime.benchmark_name = "galewsky"
# Compute error
jg.runtime.compute_error = 0
jg.runtime.f_sphere = 0
jg.runtime.viscosity = 0.0
jg.unique_id_filter = ['compile', 'parallelization']
timestep_size_min = 16
jg.runtime.max_simulation_time = timestep_size_min*1024
jg.runtime.output_timestep_size = jg.runtime.max_simulation_time
#####################################
# Reference Job: swe_sphere with ERK4
#####################################
jg.compile.program = 'swe_sphere'
ref_ts_size = 8
ref_ts_method = 'ln_erk'
ref_ts_order = 4
jg.runtime.rexi_method = None
jg.runtime.timestepping_method = ref_ts_method
jg.runtime.timestepping_order = ref_ts_order
jg.runtime.timestepping_order2 = ref_ts_order
jg.runtime.timestep_size = ref_ts_size
jg.reference_job = True
jg.gen_jobscript_directory()
jg.reference_job = False
# Use this one as the reference solution!
jg.reference_job_unique_id = jg.job_unique_id
#################################
# Test Jobs: libpfasst_swe_sphere
#################################
jg.compile.program = 'libpfasst_swe_sphere_mlsdc'
jg.compile.libpfasst = 'enable'
# LibPFASST runtime parameters
# set them all explicitly to make sure we know what's happening
jg.runtime.libpfasst_nlevels = 1
jg.runtime.libpfasst_nnodes = 5
jg.runtime.libpfasst_nsweeps_coarse = 1
jg.runtime.libpfasst_nodes_type = 'GAUSS_LOBATTO'
jg.runtime.libpfasst_coarsening_multiplier = 0.5
jg.runtime.libpfasst_use_rexi = 0
jg.runtime.libpfasst_implicit_coriolis_force = 0
jg.runtime.libpfasst_use_rk_stepper = 0
#####################################################
#####################################################
#####################################################
timestep_sizes = [timestep_size_min*(2.0**i) for i in range(0, 6)]
#
# Create job scripts
#
#for jg.runtime.libpfasst_nnodes in [3,5]:
for jg.runtime.libpfasst_niters in range(1,3):
for jg.runtime.timestep_size in timestep_sizes:
if jg.runtime.max_simulation_time % jg.runtime.timestep_size != 0:
print("simtime: "+str(jg.runtime.max_simulation_time))
print("timestep_size: "+str(jg.runtime.timestep_size))
raise Exception("Invalid time step size (not remainder-less dividable)")
jg.runtime.timestepping_order = min(jg.runtime.libpfasst_niters, 2 * jg.runtime.libpfasst_nnodes - 3)
jg.gen_jobscript_directory()
|
# -*- coding: utf-8 -*-
import os
import glob
import subprocess
import unittest
testfile = map(lambda f: f.strip('.s'), glob.glob('tests/*.s'))
def get_expect_output(testfile):
with open('%s.expected' % testfile, 'rb') as f:
return f.read()
def get_exec_output(testfile):
return subprocess.check_output(['./as_exec', '%s.s' % testfile])
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
class TestRunnerMeta(type):
def __new__(mcs, name, bases, dict):
def gen_test(got, expected):
def test(self):
self.assertEqual(got, expected)
return test
for f in testfile:
test_name = 'test_%s' % (os.path.basename(f))
got = get_exec_output(f)
expected = get_expect_output(f)
dict[test_name] = gen_test(got, expected)
return type.__new__(mcs, name, bases, dict)
class TestRunner(with_metaclass(TestRunnerMeta, unittest.TestCase)):
pass
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-07-17 17:01
# @Author : Iceyhuang
# @license : Copyright(C), Tencent
# @Contact : iceyhuang@tencent.com
# @File : visualize_cifar.py
# @Software: PyCharm
# @Version : Python 3.7.3
# 用于将cifar10的数据可视化
import os
import pickle
import numpy as np
from scipy.misc import imsave
import matplotlib.image as plimg
from PIL import Image
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb')as f:
# datadict = pickle.load(f)
datadict = pickle.load(f, encoding='latin1')
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32)
Y = np.array(Y)
return X, Y
def load_CIFAR_Labels(filename):
with open(filename, 'rb') as f:
lines = [x for x in f.readlines()]
print(lines)
def visualize1():
num = 5
load_CIFAR_Labels("CIFAR/train/batches.meta")
imgX, imgY = load_CIFAR_batch("CIFAR10/data_batch_{}".format(num))
print(imgX.shape)
print("正在保存图片:")
# for i in range(imgX.shape[0]):
for i in range(10): # 值输出10张图片,用来做演示
# imgs = imgX[i - 1]#?
imgs = imgX[i]
img0 = imgs[0]
img1 = imgs[1]
img2 = imgs[2]
i0 = Image.fromarray(img0) # 从数据,生成image对象
i1 = Image.fromarray(img1)
i2 = Image.fromarray(img2)
img = Image.merge("RGB", (i0, i1, i2))
name = "img" + str(i) + '.png'
img.save("./cifar10_images/train" + name, "png") # 文件夹下是RGB融合后的图像
for j in range(0, imgs.shape[0]):
# img = imgs[j - 1]
img = imgs[j]
J = j
name = "img" + str(i) + str(J) + ".png"
print("正在保存图片" + name)
save_path = "./cifar10_images/train/{}/".format(num)
if not os.path.exists(save_path):
os.mkdir(save_path)
plimg.imsave(save_path + name, img) # 文件夹下是RGB分离的图像
print("保存完毕.")
def load_file(filename):
with open(filename, 'rb') as fo:
data = pickle.load(fo, encoding='latin1')
return data
# 解压缩,返回解压后的字典
def unpickle(file):
fo = open(file, 'rb')
dict = pickle.load(fo, encoding='latin1')
fo.close()
return dict
def load_train():
# 生成训练集图片,如果需要png格式,只需要改图片后缀名即可。
save_path = 'cifar10'
train_path = os.path.join(save_path, 'train')
if not os.path.exists(train_path):
os.mkdir(train_path)
for j in range(1, 6):
dataName = "data_batch_" + str(j)
path = os.path.join('CIFAR10', dataName)
Xtr = unpickle(path)
print(dataName + " is loading...")
for i in range(0, 10000):
img = np.reshape(Xtr['data'][i], (3, 32, 32)) # Xtr['data']为图片二进制数据
img = img.transpose(1, 2, 0) # 读取image
picName = train_path + '/' + str(Xtr['labels'][i]) + '_' + str(
i + (j - 1) * 10000) + '.jpg' # Xtr['labels']为图片的标签,值范围0-9,本文中,train文件夹需要存在,并与脚本文件在同一目录下。
imsave(picName, img)
print(dataName + " loaded.")
def load_test():
save_path = 'cifar10'
print("test_batch is loading...")
# 生成测试集图片
test_path = os.path.join(save_path, 'test')
if not os.path.exists(test_path):
os.mkdir(test_path)
path = os.path.join('CIFAR10', "test_batch")
testXtr = unpickle(path)
for i in range(0, 10000):
img = np.reshape(testXtr['data'][i], (3, 32, 32))
img = img.transpose(1, 2, 0)
picName = test_path + '/' + str(testXtr['labels'][i]) + '_' + str(i) + '.jpg'
imsave(picName, img)
print("test_batch loaded.")
def visualize2():
load_train()
load_test()
if __name__ == "__main__":
# visualize1()
# CIFAR-10 dataset 的下载与使用、转图片https://blog.csdn.net/ctwy291314/article/details/83864405
# data = load_file('CIFAR10/test_batch')
# print(data.keys())
visualize2()
|
import requests_mock
import requests
from os import path
import sys
import unittest
from pandas import DataFrame as df, Timestamp
from alpaca_trade_api.alpha_vantage import REST
import pytest
import os
cli = REST(os.getenv("ALPHAVANTAGE_API_KEY"))
# def endpoint(params=''):
# return 'https://www.alphavantage.co/query?{}&apikey=key-id'.format(params)
# def test_get_alpha_vantage():
# # Sample response
# # {
# # "Global Quote": {
# # "01. symbol": "TSLA",
# # "02. open": "####",
# # "03. high": "####",
# # "04. low": "####",
# # "05. price": "####",
# # "06. volume": "####",
# # "07. latest trading day": "yyyy-mm-dd",
# # "08. previous close": "####",
# # "09. change": "####",
# # "10. change percent": "####%"
# # }
# # }
# # Test Get Method
# tsla_quote = cli.get(params={'function': 'GLOBAL_QUOTE', 'symbol': 'TSLA'})
# assert tsla_quote['Global Quote']["01. symbol"] == "TSLA"
# assert '05. price' in str(tsla_quote['Global Quote'].keys())
# with pytest.raises(AttributeError):
# tsla_quote.foo
# def test_historical_quotes():
# # Sample Response
# # {
# # "Meta Data": {
# # "1. Information": "Daily Prices (open, high, low, close) and Volumes",
# # "2. Symbol": "TSLA",
# # "3. Last Refreshed": "yyyy-mm-dd",
# # "4. Output Size": "Compact",
# # "5. Time Zone": "US/Eastern"
# # },
# # "Time Series (Daily)": {
# # "yyyy-mm-dd": {
# # "1. open": "####",
# # "2. high": "####",
# # "3. low": "####",
# # "4. close": "####",
# # "5. volume": "####"
# # },
# # "yyyy-mm-dd": {
# # "1. open": "####",
# # "2. high": "####",
# # "3. low": "####",
# # "4. close": "####",
# # "5. volume": "####"
# # }
# # }
# historic_quotes = cli.historic_quotes(
# 'TSLA', adjusted=False, outputsize='full', cadence='daily', output_format=None)
# assert len(historic_quotes.keys()) > 0
# assert len(historic_quotes) > 0
# with pytest.raises(AttributeError):
# historic_quotes.foo
# def test_intraday_quotes():
# # Sample Response
# # {
# # "Meta Data": {
# # "1. Information": "Intraday (5min) open, high, low, close prices and volume",
# # "2. Symbol": "MSFT",
# # "3. Last Refreshed": "2020-01-13 16:00:00",
# # "4. Interval": "5min",
# # "5. Output Size": "Compact",
# # "6. Time Zone": "US/Eastern"
# # },
# # "Time Series (5min)": {
# # "2020-01-13 16:00:00": {
# # "1. open": "163.1300",
# # "2. high": "163.3200",
# # "3. low": "163.1100",
# # "4. close": "163.2800",
# # "5. volume": "1094345"
# # },
# # "2020-01-13 15:55:00": {
# # "1. open": "162.9200",
# # "2. high": "163.1500",
# # "3. low": "162.9000",
# # "4. close": "163.1351",
# # "5. volume": "522600"
# # }
# # }
# intraday_quotes = cli.intraday_quotes(
# 'TSLA', interval='5min', outputsize='full', output_format=None)
# assert len(intraday_quotes.keys()) > 0
# assert len(intraday_quotes) > 0
# with pytest.raises(AttributeError):
# intraday_quotes.foo
# def test_current_quote():
# # Sample response
# # {
# # "Global Quote": {
# # "01. symbol": "TSLA",
# # "02. open": "####",
# # "03. high": "####",
# # "04. low": "####",
# # "05. price": "####",
# # "06. volume": "####",
# # "07. latest trading day": "yyyy-mm-dd",
# # "08. previous close": "####",
# # "09. change": "####",
# # "10. change percent": "####%"
# # }
# # }
# # Test Get Method
# tsla_quote = cli.current_quote('TSLA')
# assert tsla_quote["01. symbol"] == "TSLA"
# assert '05. price' in str(tsla_quote.keys())
# with pytest.raises(AttributeError):
# tsla_quote.foo
# def test_search_endpoint():
# # Sample response
# # {
# # "bestMatches": [
# # {
# # "1. symbol": "BA",
# # "2. name": "The Boeing Company",
# # "3. type": "Equity",
# # "4. region": "United States",
# # "5. marketOpen": "09:30",
# # "6. marketClose": "16:00",
# # "7. timezone": "UTC-05",
# # "8. currency": "USD",
# # "9. matchScore": "1.0000"
# # },
# # {
# # "1. symbol": "BABA",
# # "2. name": "Alibaba Group Holding Limited",
# # "3. type": "Equity",
# # "4. region": "United States",
# # "5. marketOpen": "09:30",
# # "6. marketClose": "16:00",
# # "7. timezone": "UTC-05",
# # "8. currency": "USD",
# # "9. matchScore": "0.8000"
# # }
# # }
# search_endpoint = cli.search_endpoint(keywords='BA')
# assert search_endpoint["bestMatches"][0]["2. name"] == "The Boeing Company"
# assert "4. region" in str(search_endpoint["bestMatches"][0].keys())
# with pytest.raises(AttributeError):
# search_endpoint.foo
class TestAlphaVantage(unittest.TestCase):
_API_KEY_TEST = "test"
_API_EQ_NAME_TEST = 'MSFT'
@staticmethod
def get_file_from_url(url):
"""
Return the file name used for testing, found in the test data folder
formed using the original url
"""
tmp = url
for ch in [':', '/', '.', '?', '=', '&', ',']:
if ch in tmp:
tmp = tmp.replace(ch, '_')
path_dir = path.join(path.dirname(
path.abspath(__file__)), 'test_data/')
return path.join(path.join(path_dir, tmp))
@requests_mock.Mocker()
def test_get_method(self, mock_request):
""" Test that api call returns a json file as requested
"""
cli = REST(TestAlphaVantage._API_KEY_TEST)
params = {'function': 'TIME_SERIES_INTRADAY',
'symbol': 'MSFT', 'interval': '1min'}
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey=test'
path_file = self.get_file_from_url("mock_time_series")
with open(path_file) as f:
mock_request.get(url, text=f.read())
data = cli.get(params)
self.assertIsInstance(
data, dict, 'Result Data must be a dictionary')
@requests_mock.Mocker()
def test_historic_quotes(self, mock_request):
""" Test that api call returns a json file as requested
"""
cli = REST(TestAlphaVantage._API_KEY_TEST)
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=MSFT&outputsize=full&apikey=test&datatype=json'
path_file = self.get_file_from_url("mock_time_series_daily")
with open(path_file) as f:
mock_request.get(url, text=f.read())
data = cli.historic_quotes('MSFT', cadence='daily')
self.assertIsInstance(
data, dict, 'Result Data must be a dictionary')
@requests_mock.Mocker()
def test_intraday_quotes_pandas(self, mock_request):
""" Test that api call returns a json file as requested
"""
cli = REST(TestAlphaVantage._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&outputsize=full&apikey=test&datatype=json"
path_file = self.get_file_from_url("mock_time_series")
with open(path_file) as f:
mock_request.get(url, text=f.read())
data = cli.intraday_quotes(
"MSFT", interval='1min', outputsize='full', output_format='pandas')
self.assertIsInstance(
data, df, 'Result Data must be a pandas data frame')
@requests_mock.Mocker()
def test_current_quote(self, mock_request):
""" Test that api call returns a json file as requested
"""
cli = REST(TestAlphaVantage._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=MSFT&apikey=test&datatype=json"
path_file = self.get_file_from_url("global_quote")
with open(path_file) as f:
mock_request.get(url, text=f.read())
data = cli.current_quote("MSFT")
self.assertIsInstance(
data, dict, 'Result Data must be a dict')
@requests_mock.Mocker()
def test_current_quote(self, mock_request):
""" Test that api call returns a json file as requested
"""
cli = REST(TestAlphaVantage._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=MSFT&apikey=test&datatype=json"
path_file = self.get_file_from_url("global_quote")
with open(path_file) as f:
mock_request.get(url, text=f.read())
data = cli.current_quote("MSFT")
self.assertIsInstance(
data, dict, 'Result Data must be a dict')
@requests_mock.Mocker()
def test_search_endpoint(self, mock_request):
""" Test that api call returns a json file as requested
"""
cli = REST(TestAlphaVantage._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords=BA&datatype=json&apikey=test"
path_file = self.get_file_from_url("symbol_search")
with open(path_file) as f:
mock_request.get(url, text=f.read())
data = cli.search_endpoint("BA")
self.assertIsInstance(
data, dict, 'Result Data must be a dict')
@requests_mock.Mocker()
def test_historic_fx_quotes(self, mock_request):
""" Test that api call returns a json file as requested
"""
cli = REST(TestAlphaVantage._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=FX_DAILY&from_symbol=USD&to_symbol=EUR&outputsize=full&apikey=test"
path_file = self.get_file_from_url("mock_foreign_exchange_historical")
with open(path_file) as f:
mock_request.get(url, text=f.read())
data = cli.historic_fx_quotes('USD', 'EUR')
self.assertIsInstance(
data, dict, 'Result Data must be a dict')
@requests_mock.Mocker()
def test_intraday_fx_quotes(self, mock_request):
""" Test that api call returns a json file as requested
"""
cli = REST(TestAlphaVantage._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol=USD&to_symbol=EUR&outputsize=full&apikey=test"
path_file = self.get_file_from_url("mock_intraday_fx")
with open(path_file) as f:
mock_request.get(url, text=f.read())
data = cli.intraday_fx_quotes('USD', 'EUR')
self.assertIsInstance(
data, dict, 'Result Data must be a dict')
@requests_mock.Mocker()
def test_exchange_rate(self, mock_request):
""" Test that api call returns a json file as requested
"""
cli = REST(TestAlphaVantage._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency=USD&to_currency=EUR&apikey=test"
path_file = self.get_file_from_url("mock_foreign_exchange")
with open(path_file) as f:
mock_request.get(url, text=f.read())
data = cli.exchange_rate('USD', 'EUR')
self.assertIsInstance(
data, dict, 'Result Data must be a dict')
@requests_mock.Mocker()
def test_historic_cryptocurrency_quotes(self, mock_request):
""" Test that api call returns a json file as requested
"""
cli = REST(TestAlphaVantage._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_DAILY&symbol=BTC&market=CNY&apikey=test&datatype=json"
path_file = self.get_file_from_url("mock_crypto_currencies")
with open(path_file) as f:
mock_request.get(url, text=f.read())
data = cli.historic_cryptocurrency_quotes('BTC', 'CNY')
self.assertIsInstance(
data, dict, 'Result Data must be a dict')
@requests_mock.Mocker()
def test_techindicators(self, mock_request):
""" Test that api call returns a json file as requested
"""
cli = REST(TestAlphaVantage._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=SMA&interval=weekly&time_period=10&apikey=test"
path_file = self.get_file_from_url("mock_technical_indicator")
with open(path_file) as f:
mock_request.get(url, text=f.read())
data = cli.techindicators(
techindicator='SMA', interval='weekly', time_period=10)
self.assertIsInstance(
data, dict, 'Result Data must be a dict')
@requests_mock.Mocker()
def test_sector_pandas(self, mock_request):
""" Test that api call returns a json file as requested
"""
cli = REST(TestAlphaVantage._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=SECTOR&apikey=test"
path_file = self.get_file_from_url("mock_sector")
with open(path_file) as f:
mock_request.get(url, text=f.read())
data = cli.sector()
self.assertIsInstance(
data, dict, 'Result Data must be a dict')
|
import os
import shlex
import subprocess
from pt_helper import Command
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPLAY_SH = """
#!/bin/bash
# Replay the shell session recording that is in the same directory as this script
SCRIPT_DIR=$(dirname ${BASH_SOURCE[0]})
scriptreplay --log-out "${SCRIPT_DIR}/stdout" --log-timing "${SCRIPT_DIR}/stdout.time"
""".strip()
def create_replay_sh(output_dir: str) -> None:
file_name = os.path.join(output_dir, "replay.sh")
with open(file_name, "w") as f:
f.write(REPLAY_SH)
os.chmod(file_name, mode=0o755)
def create_pretty_run_command(cmd: Command) -> str:
pretty_run = os.path.join(SCRIPT_DIR, "..", "pretty-run.py")
pretty_run = os.path.realpath(pretty_run)
command_string = shlex.join([cmd.name, *cmd.args])
return shlex.join([pretty_run, command_string])
def run_with_script(cmd: Command, output_dir: str) -> int:
create_replay_sh(output_dir)
pretty_command_string = create_pretty_run_command(cmd)
stdout_file = os.path.join(output_dir, "stdout")
timing_file = os.path.join(output_dir, "stdout.time")
return subprocess.call(
[
"script",
"--return",
"--quiet",
"--log-out",
stdout_file,
"--log-timing",
timing_file,
"--command",
pretty_command_string,
]
)
|
# -*- coding: utf-8 -*-
#
# Database upgrade script
#
# RLP Template Version 1.1.0 => 1.1.1
#
# Execute in web2py folder after code upgrade like:
# python web2py.py -S eden -M -R applications/eden/modules/templates/RLP/upgrade/1.1.0-1.1.1.py
#
import datetime
import sys
#from s3 import S3DateTime
#from gluon.storage import Storage
#from gluon.tools import callback
# Override auth (disables all permission checks)
auth.override = True
# Failed-flag
failed = False
# Info
def info(msg):
sys.stderr.write("%s" % msg)
def infoln(msg):
sys.stderr.write("%s\n" % msg)
# Load models for tables
dtable = s3db.hrm_delegation
IMPORT_XSLT_FOLDER = os.path.join(request.folder, "static", "formats", "s3csv")
TEMPLATE_FOLDER = os.path.join(request.folder, "modules", "templates", "RLP")
# -----------------------------------------------------------------------------
# Upgrade user roles
#
if not failed:
info("Upgrade user roles")
bi = s3base.S3BulkImporter()
filename = os.path.join(TEMPLATE_FOLDER, "auth_roles.csv")
with open(filename, "r") as File:
try:
bi.import_role(filename)
except Exception as e:
infoln("...failed")
infoln(sys.exc_info()[1])
failed = True
else:
infoln("...done")
# -----------------------------------------------------------------------------
if not failed:
info("Clean up duplicate person details")
dtable = s3db.pr_person_details
query = (dtable.deleted == False)
# Find all persons with duplicate person details
rows = db(query).select(dtable.person_id.min(),
groupby = dtable.person_id,
having = dtable.id.count() > 1,
)
updated = 0
deleted = 0
for row in rows:
person_id = row[dtable.person_id.min()]
query = (dtable.person_id == person_id) & \
(dtable.deleted == False)
details = db(query).select(dtable.id,
dtable.alias,
dtable.occupation,
orderby = dtable.modified_on,
)
data = {}
original = None
for index, subrow in enumerate(details):
if subrow.alias:
data["alias"] = subrow.alias
if subrow.occupation and "occupation" not in data:
data["occupation"] = subrow.occupation
if index == 0:
original = subrow
else:
subrow.delete_record()
deleted += 1
if original:
data["modified_on"] = dtable.modified_on
data["modified_by"] = dtable.modified_by
original.update_record(**data)
updated += 1
infoln("...done (%s records updated, %s record deleted)" % (updated, deleted))
# -----------------------------------------------------------------------------
# Finishing up
#
if failed:
db.rollback()
infoln("UPGRADE FAILED - Action rolled back.")
else:
db.commit()
infoln("UPGRADE SUCCESSFUL.")
|
from abc import abstractmethod
from typing import Any, Optional, List, Mapping, Sequence, Type, Union, Dict
from sqlalchemy.orm import DeclarativeMeta, ColumnProperty
from sqlalchemy.sql import insert, Insert, Select
from ..query import CRUDQuery
from .base import BaseProvider
from .select_provider import SelectProvider
class InsertProvider(SelectProvider, BaseProvider):
@abstractmethod
async def insert(self, *args, **kwargs):
pass
@abstractmethod
async def bulk_insert(self, *args, **kwargs):
pass
@abstractmethod
def make_insert_stmt(self, *args, **kwargs):
pass
@abstractmethod
def make_bulk_insert_stmt(self, *args, **kwargs):
pass
def _make_insert_stmt(
self,
query: CRUDQuery,
mapper: DeclarativeMeta,
returning: bool = True,
) -> Union[Insert, Select]:
insert_stmt = insert(mapper)
insertable_values = self.__make_insertable_values(
mapper=mapper,
values=query.dict,
)
insert_stmt = insert_stmt.values(**insertable_values)
if returning:
return self._make_select_from_insert(
query=query,
insert_stmt=insert_stmt,
mapper=mapper,
)
return insert_stmt
def _make_bulk_insert_stmt(
self,
query: Union[Type[CRUDQuery], CRUDQuery],
values_seq: Sequence[Dict[str, Any]],
mapper: DeclarativeMeta,
returning: bool = True,
) -> Union[Insert, Select]:
"""
"""
insertable_values_seq: List[Mapping[str, Any]] = []
for values in values_seq:
insertable_values = self.__make_insertable_values(
mapper=mapper,
values=values
)
insertable_values_seq.append(insertable_values)
insert_stmt = insert(mapper)
insert_stmt = insert_stmt.values(insertable_values_seq)
if returning:
return self._make_select_from_insert(
query=query,
insert_stmt=insert_stmt,
mapper=mapper,
)
return insert_stmt
async def _insert(
self,
query: CRUDQuery,
mapper: DeclarativeMeta,
returning: bool = True,
) -> Optional[CRUDQuery]:
"""
if returning is True, returns instance of passed query
"""
insert_stmt = self._make_insert_stmt(
query=query,
mapper=mapper,
returning=returning
)
scalar_result = await self._session.execute(insert_stmt)
if returning:
return query.from_selected_row(scalar_result.first())
async def _bulk_insert(
self,
query: Union[CRUDQuery, Type[CRUDQuery]],
values_seq: Sequence[Dict[str, Any]],
mapper: DeclarativeMeta,
returning: bool = True,
) -> Optional[Sequence[CRUDQuery]]:
"""
If returning is True, returns iterable object that contains
CRUDQuery
"""
if not values_seq:
return
insert_stmt = self._make_bulk_insert_stmt(
query=query,
values_seq=values_seq,
mapper=mapper,
returning=returning
)
scalar_result = await self._session.execute(insert_stmt)
if returning:
return query.from_selected_rows(scalar_result.all())
@staticmethod
def __make_insertable_values(
mapper: DeclarativeMeta,
values: Dict[str, Any],
) -> Mapping[str, Any]:
insertable_values = dict()
for field_name, value in values.items():
mapper_field = getattr(mapper, field_name, None)
if mapper_field is None:
continue
if not isinstance(mapper_field.property, ColumnProperty):
continue
insertable_values[field_name] = value
return insertable_values
|
"""
Main module that starts RabbitMQ connection and publishes ArtifactPublished
events
"""
import logging
import signal
import sys
import os
import json
from eiffelactory import artifactory
from eiffelactory import config
from eiffelactory import eiffel
from eiffelactory import rabbitmq
from eiffelactory import utils
if not os.path.exists('logs'):
os.makedirs('logs')
LOGGER_ARTIFACTS = utils.setup_event_logger('artifacts', 'artifacts.log',
logging.DEBUG)
LOGGER_PUBLISHED = utils.setup_event_logger('published', 'published.log',
logging.INFO)
LOGGER_RECEIVED = utils.setup_event_logger('received', 'received.log', logging.INFO)
LOGGER_APP = utils.setup_app_logger('app', 'eiffelactory.log', logging.DEBUG)
CFG = config.Config()
class App:
"""
that starts RabbitMQ connection and publishes ArtifactPublished
events
"""
def __init__(self):
self.rmq_connection = rabbitmq.RabbitMQConnection(CFG.rabbitmq,
self.on_event_received)
self.artifactory_connection = artifactory.ArtifactoryConnection(
CFG.artifactory)
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
def on_event_received(self, event):
"""
Callback method passed to RabbitMQConnection and that processes
received messages
:param event: RabbitMQ message
:return:
"""
if not eiffel.is_artifact_created_event(event) or (
CFG.eiffelactory.event_sources and
not eiffel.is_sent_from_sources(event,
CFG.eiffelactory.event_sources)):
return
LOGGER_RECEIVED.info(event)
artc_meta_id = event['meta']['id']
artc_data_identity = event['data']['identity']
artifact = self.artifactory_connection.\
find_artifact_on_artifactory(*utils.parse_purl(artc_data_identity))
if artifact:
if len(artifact) > 1:
LOGGER_APP.error("AQL query returned '%d' artifacts",
len(artifact))
return
else:
self._publish_artp_event(artc_meta_id, artifact[0])
def _publish_artp_event(self, artc_meta_id, artifact):
"""
Creates and ArtifactPublished event and sends it to RabbitMQ exchange
:param artc_meta_id: the id of ArtifactCreated event
:param artifact: the results dictionary returned from Artifactory by the
AQL query.
"""
location = '{}/{}/{}/{}'.format(CFG.artifactory.url,
artifact['repo'],
artifact['path'],
artifact['name'])
artp_event = eiffel.create_artifact_published_event(
artc_meta_id, [eiffel.Location(location)])
artp_event_json = json.dumps(utils.remove_none_from_dict(artp_event))
self.rmq_connection.publish_message(json.loads(artp_event_json))
LOGGER_ARTIFACTS.info(artifact)
LOGGER_PUBLISHED.info(artp_event_json)
def run(self):
"""
Starts the app by starting to listen to RabbitMQ messages.
"""
self.rmq_connection.read_messages()
def _signal_handler(self, signal_received, frame):
"""
Method for handling Ctrl-C. The two unused arguments have to be there,
otherwise it won't work
:param signal_received:
:param frame:
:return:
"""
self.rmq_connection.close_connection()
sys.exit(0)
|
import datetime as dt
from mongoengine import *
from .user import User
class Scheduling(EmbeddedDocument):
DEFAULT_TYPE = 'daily'
TYPES = (DEFAULT_TYPE, 'weekdays', 'weekends', 'weekly', 'monthly')
user = ReferenceField(User, required=True)
type = StringField(required=True, default=DEFAULT_TYPE, choices=TYPES)
time = DateTimeField(required=True)
ends_at = DateTimeField()
ands_after = IntField()
created_at = DateTimeField(required=True, default=dt.datetime.now())
|
# Create a data set using features extracted by librosa.
import multiprocessing
from os import path
import pickle
import common
from dbispipeline.utils import prefix_path
import librosa
import numpy as np
import pandas as pd
DATA_PATH = prefix_path("audio_data", common.DEFAULT_PATH)
def extract_features(song_path):
# Load song.
song, sr = librosa.load(song_path)
# Extract BPM.
bpm, _ = librosa.beat.beat_track(y=song, sr=sr)
# Extract zero-crossing rate.
zcr = sum(librosa.zero_crossings(y=song)) / len(song)
# Extract spectral centroid.
spec_centroid = np.mean(
librosa.feature.spectral_centroid(y=song, sr=sr)[0])
spec_centroid_stddev = np.std(
librosa.feature.spectral_centroid(y=song, sr=sr)[0])
# Extract spectral rolloff.
spec_rolloff = np.mean(librosa.feature.spectral_rolloff(y=song, sr=sr)[0])
spec_rolloff_stddev = np.std(
librosa.feature.spectral_rolloff(y=song, sr=sr)[0])
# Extract spectral flatness.
spec_flat = np.mean(librosa.feature.spectral_flatness(y=song)[0])
spec_flat_stddev = np.std(librosa.feature.spectral_flatness(y=song)[0])
# Extract spectral contrast.
spec_contrast = np.mean(
librosa.feature.spectral_contrast(y=song, sr=sr)[0])
spec_contrast_stddev = np.std(
librosa.feature.spectral_contrast(y=song, sr=sr)[0])
# Extract MFCCs.
mfccs = librosa.feature.mfcc(y=song, sr=sr)
mfcc = [np.mean(c) for c in mfccs]
# Done.
features = [
bpm,
zcr,
spec_centroid,
spec_centroid_stddev,
spec_rolloff,
spec_rolloff_stddev,
spec_flat,
spec_flat_stddev,
spec_contrast,
spec_contrast_stddev,
]
for c in mfcc:
features.append(c)
columns = [
"bpm",
"zcr",
"spectral_centroid",
"spectral_centroid_stddev",
"spectral_rolloff",
"spectral_rolloff_std",
"spectral_flatness",
"spectral_flatness_std",
"spectral_contrast",
"spectral_contrast_std",
]
for i in range(len(mfcc)):
columns.append(f"mfcc{i + 1}")
return pd.DataFrame([features], columns=columns)
def process_line(line):
i, line = line[0], line[1]
print(f"{i}")
# Get audio path and tags for the current song.
fields = line.split("\t")
mp3_path = path.join(DATA_PATH, fields[3])
tags = [t.replace("\n", "") for t in fields[5:]]
# Extract audio features for the given song.
df_features = extract_features(mp3_path)
# Construct result data frame.
df_tags = pd.DataFrame([[tags]], columns=["tags"])
df_res = df_features.join(df_tags)
# Done.
return df_res
def generate_data_set(set_path, save_path):
# Process every song in the given set.
with open(set_path, "r") as f:
lines = enumerate(f.readlines()[1:])
pool = multiprocessing.pool.Pool()
frames = pool.map(process_line, lines)
# Combine data frames.
res = pd.concat(frames)
# Save.
pickle.dump(res, open(save_path, "wb"))
if __name__ == "__main__":
generate_data_set(
prefix_path("autotagging_moodtheme-train.tsv", common.DEFAULT_PATH),
prefix_path("autotagging_moodtheme-train-librosa.pickle",
common.DEFAULT_PATH),
)
generate_data_set(
prefix_path("autotagging_moodtheme-test.tsv", common.DEFAULT_PATH),
prefix_path("autotagging_moodtheme-test-librosa.pickle",
common.DEFAULT_PATH),
)
|
__author__ = 'matt'
from copy import deepcopy
from os.path import join, split, exists
import numpy as np
from .cvwrap import cv2
from chumpy.utils import row, col
from .utils import wget
def get_earthmesh(trans, rotation):
from .serialization import load_mesh
from copy import deepcopy
if not hasattr(get_earthmesh, 'm'):
def wg(url):
dest = join('/tmp', split(url)[1])
if not exists(dest):
wget(url, dest)
wg('http://files.is.tue.mpg.de/mloper/opendr/images/nasa_earth.obj')
wg('http://files.is.tue.mpg.de/mloper/opendr/images/nasa_earth.mtl')
wg('http://files.is.tue.mpg.de/mloper/opendr/images/nasa_earth.jpg')
fname = join('/tmp', 'nasa_earth.obj')
mesh = load_mesh(fname)
mesh.v = np.asarray(mesh.v, order='C')
mesh.vc = mesh.v*0 + 1
mesh.v -= row(np.mean(mesh.v, axis=0))
mesh.v /= np.max(mesh.v)
mesh.v *= 2.0
get_earthmesh.mesh = mesh
mesh = deepcopy(get_earthmesh.mesh)
mesh.v = mesh.v.dot(cv2.Rodrigues(np.asarray(np.array(rotation), np.float64))[0])
mesh.v = mesh.v + row(np.asarray(trans))
return mesh
def process(im, vmin, vmax):
shape = im.shape
im = deepcopy(im).flatten()
im[im>vmax] = vmax
im[im<vmin] = vmin
im -= vmin
im /= (vmax-vmin)
im = im.reshape(shape)
return im
|
load("@npm//@bazel/typescript:index.bzl", "ts_library")
load("@npm//jest-cli:index.bzl", _jest_test = "jest_test")
def ts_test(name, srcs, deps, data = [], jest_config = "//:jest.config.js", **kwargs):
"A macro around the autogenerated jest_test rule, by the incomparable @spacerat"
lib_name = name + "_lib"
src_name = name + "_src"
deps = deps + [
"@npm//@aws-cdk/assert",
"@npm//@types/jest",
]
# Compile the test and extract its js files
ts_library(
name = lib_name,
srcs = srcs,
deps = deps,
)
native.filegroup(
name = src_name,
srcs = [":" + lib_name],
output_group = "es5_sources",
)
src_label = ":" + src_name
# Run the test
args = [
"--no-cache",
"--no-watchman",
"--ci",
]
args.extend(["--config", "$(rootpath %s)" % jest_config])
args.extend(["--runTestsByPath", "$(rootpaths %s)" % src_label])
_jest_test(
name = name,
data = [jest_config, src_label] + deps + data,
templated_args = args,
**kwargs
)
|
from cm.base import ObjectBase, StatusBase
from cm.db import Connection
from cm import config, error, cookie, i18n
import re, sha, time, binascii, urlparse
_password_generator = None
def get_password_generator():
global _password_generator
if _password_generator is None:
_password_generator = PasswordGenerator()
return _password_generator
class PasswordGenerator:
'''Instance attributes:
hash : any
'''
default_seed = 'iFfyAA3niLGN7ncn7vx2w' # some random secret data
def __init__(self, seed=default_seed):
self.hash = sha.new(seed)
self.hash.update(str(time.time()))
def generate(self, seed='', length=8):
'''Generate a password. Some effort is made to make it random.
seed: if supplied the str() of this argument is used to update the
generator state before generating the password.
length: the maximum length of the password to return.
'''
try:
# Try to use /dev/urandom.
self.hash.update(open('/dev/urandom', 'rb').read(length))
except IOError:
# No source of random data. This method will now only
# generate passwords that look random if seed is kept secret.
self.hash.update(str(time.time()))
self.hash.update(str(seed))
return binascii.b2a_base64(self.hash.digest())[:length]
def hash_password(password):
'''Apply a one way hash function to a password
and return the result.
'''
return sha.new(password).hexdigest()
_conn = None
def open_connection(self):
'''A mixin method for returning a globally single
connection object.'''
global _conn
if _conn is None:
_conn = Connection(config.USR_DB_DATABASE,
config.USR_DB_HOST,
config.USR_DB_USER,
config.USR_DB_PASSWORD,
config.USR_DB_MAXCONN,
config.USR_DB_MINCONN)
return _conn
class UserManager(ObjectBase):
_get_connection = open_connection
def get(self, kw):
order_sql = self._get_sort_order(kw)
by = kw.get('sort_by')
if by == 'username':
by_sql = 'username'
elif by == 'name':
by_sql = 'name'
elif by == 'email':
by_sql = 'email'
elif by == 'role':
by_sql = 'role'
elif by == 'status':
by_sql = 'status'
elif by == 'description':
by_sql = 'description'
else:
by_sql = 'username'
sql = '''select u.username, u.name, u.email, u.description,
r.name as role, s.description as status
from usr_user u, usr_role r, usr_user_status s
where u.role_id = r.role_id
and u.status_id = s.status_id
order by %s %s;
''' % (by_sql, order_sql)
return self._query(sql)
class User(ObjectBase):
_get_connection = open_connection
def new(self, kw):
kw['status_id'] = UserStatus().get_id(kw['status'])
kw['role_id'] = UserRole().get_id(kw['role'])
sql = """insert into usr_user(username, name, email,
description, role_id, lang_code, status_id)
values(%(username)s, %(name)s, %(email)s,
%(description)s, %(role_id)s, %(lang_code)s,
%(status_id)s);
"""
self._query(sql, kw)
def set(self, kw):
if 'status' in kw.keys():
kw['status_id'] = UserStatus().get_id(kw['status'])
del kw['status']
if 'role' in kw.keys():
kw['role_id'] = UserRole().get_id(kw['role'])
del kw['role']
if 'password' in kw.keys():
del kw['password'] # use set_password instead
keys = kw.keys()
f = [key + " = " + "%(" + key + ")s" for key in keys]
sql = "update usr_user set " + " , ".join(f) \
+ " where username = %(username)s;"
self._query(sql, kw)
def set_password(self, kw):
if kw['new_password1'] == kw['new_password2']:
new_password = hash_password(kw['new_password1'])
sql = """update usr_user set password = %s
where username = %s;
"""
self._query(sql, new_password, kw['username'])
else:
raise 'Your new password entries did not match.'
def change_password(self, kw):
if self.authenticate(kw['username'], kw['old_password']):
self.set_password(kw['username'],
kw['new_password1'], kw['new_password2'])
else:
raise 'Wrong password.'
def delete(self, kw):
sql = 'delete from usr_user where username = %(username)s;'
self._query(sql, kw)
def get(self, kw):
sql = '''select u.username, u.name, u.email, u.description,
r.name as role, u.lang_code, s.description as status
from usr_user u, usr_user_status s, usr_role r
where u.username = %s
and u.role_id = r.role_id
and u.status_id = s.status_id;
'''
return self._query(sql, kw.get('username'))
def authenticate(self, username, password):
'''Return True if the username and password match,
otherwise return False.
'''
password = hash_password(password)
status_id = UserStatus().get_id('active')
sql = '''select r.username
from usr_user r
where r.username = %(username)s
and r.password = %(password)s
and r.status_id = %(status_id)s;
'''
rs = self._query(sql, vars())
for r in rs:
if r.username == username: return True
return False
login = authenticate
def register(self, request, username):
'''Register a user to the session manager. This usually
happens after the user has logged in/authenticated
successfully.
2 HTTP cookies are issued when registering:
cookie 1: contains the session_id assigned by the
server when user access a page
that has stored things into it.
It should expire at end of session (browser exit).
cookie 2: named username, contains the username of the
user who logs in (and successfully authenticate). It
should expire at end of session.
'''
# 2 sql queries in total, need optimization
rs = self.get({'username':username})
for r in rs:
role_name = r.role
lang_code = r.lang_code
request.session.set_role(role_name)
request.session.set_lang(lang_code)
request.session.set_user(username)
cookie.UserNameCookie().set(request, username)
def is_logged_in(self, request):
'''Return True if the user has logged in,
return False otherwise.
User has logged in if the username in
the server-side session is the same as
the username in the username cookie from
the user.
Therefore, we check:
1) if the session_id provided by the user
is correct (find a match on the server side)
2) if that session id is not just a guess
(make sure the username match)
'''
user = request.session.user
if user is None:
return False
elif user == cookie.UserNameCookie().get(request):
return True
else:
return False
def logout(self, request):
'''Remove/unregister the user session from the
session manager. This usually happens when
the user logout of the application.
'''
# remove the session on the server side
from cm.session import get_session_manager
get_session_manager().expire_session(request)
# expire the cookie on the client side
cookie.UserNameCookie().expire(request)
_user_status = None
class UserStatus(StatusBase):
'''Contains valid user statuses (name and id) for user.'''
_get_connection = open_connection
def __init__(self):
global _user_status
if _user_status is None:
sql = '''select status_id, description
from usr_user_status order by status_id asc;
'''
_user_status = self._query(sql)
self._data = _user_status
_user_role = None
class UserRole(StatusBase):
'''Contains valid user roles (name and id) for user.'''
_get_connection = open_connection
def __init__(self):
global _user_role
if _user_role is None:
sql = '''select role_id, name
from usr_role order by role_id asc;
'''
_user_role = self._query(sql)
self._data = _user_role
class RoleManager(ObjectBase):
_get_connection = open_connection
def get(self, kw):
order_sql = self._get_sort_order(kw)
by = kw.get('sort_by')
if by == 'name':
by_sql = 'name'
elif by == 'description':
by_sql = 'description'
else:
by_sql = 'name'
sql = '''select r.role_id, r.name, r.description
from usr_role r
order by %s %s;
''' % (by_sql, order_sql)
return self._query(sql)
class Role(ObjectBase):
seq_name = 'usr_role_id'
_get_connection = open_connection
def new(self, kw):
kw['role_id'] = self.get_id()
sql = '''insert into usr_role(role_id, name, description)
values(%(role_id)s, %(name)s, %(description)s);
'''
self._query(sql, kw)
def set(self, kw):
if 'name' in kw:
role_name = kw['name']
del kw['name']
keys = kw.keys()
kw['role_name'] = role_name
f = [key + ' = ' + '%(' + key + ')s' for key in keys]
sql = 'update usr_role set ' + ' , '.join(f) \
+ ' where name = %(role_name)s;'
self._query(sql, kw)
def set_urls(self, kw):
denylist = [(k,) for (k, v) in kw.iteritems() if v == 1]
cursor = self._get_connection().get_cursor()
try:
sql = 'select role_id from usr_role where name = %s;'
cursor.execute(sql, [kw.get('name')])
role_id = cursor.fetchone()[0]
sql = '''delete from usr_role_url
where role_id = %s;'''
cursor.execute(sql, [role_id])
sql = '''insert into usr_role_url(role_id, url_id, is_allow)
values (%s, %s, %s)
''' % (role_id, '%s', 0)
cursor.executemany(sql, denylist)
except:
cursor.rollback()
raise
cursor.commit()
def delete(self, kw):
cursor = self._get_connection().get_cursor()
try:
sql = 'select role_id from usr_role where name = %s;'
cursor.execute(sql, [kw.get('name')])
role_id = cursor.fetchone()[0]
sql = '''delete from usr_role_url
where role_id = %s;'''
cursor.execute(sql, [role_id])
sql = 'delete from usr_role where name = %s;'
cursor.execute(sql, [kw.get('name')])
except:
cursor.rollback()
raise
cursor.commit()
def get(self, kw):
sql = '''select role_id, name, description
from usr_role
where name = %s;
'''
return self._query(sql, kw.get('name'))
def get_urls(self, kw):
order_sql = self._get_sort_order(kw)
by = kw.get('sort_by')
if by == 'url_id':
by_sql = 'url_id'
elif by == 'url':
by_sql = 'url'
elif by == 'description':
by_sql = 'description'
elif by == 'deny':
by_sql = 'deny %s, url' % order_sql
else:
by_sql = 'url_id'
sql = '''select u.url_id, u.url, u.description, t.url_id as deny
from usr_url u left outer join
(select u.url_id
from usr_role r, usr_url u, usr_role_url ru
where r.name = %s
and r.role_id = ru.role_id
and ru.url_id = u.url_id) t
on u.url_id = t.url_id
order by %s %s;
''' % ('%s', by_sql, order_sql)
return self._query(sql, kw.get('name'))
def get_deny_urls(self, name):
cursor = self._get_connection().get_cursor()
sql = '''select u.url
from usr_role r, usr_url u, usr_role_url ru
where r.name = %s
and r.role_id = ru.role_id
and ru.url_id = u.url_id
order by url asc;
'''
cursor.execute(sql, [name])
# transform a list of tuple to a list of string
return [item[0] for item in cursor.fetchall()]
class RoleInstance:
'''A class for providing an instance for the logged
in users to store in their session. Therefore we
want it to be lightweight and contain as little code
as possible (because we pickle the instance).
'''
def __init__(self, name):
self.name = name
self.deny_urls = Role().get_deny_urls(name)
def check_access(self, url):
'''Being called at the beginning of each page.'''
if self.deny(url):
raise error.AccessNotAllowedError
def deny(self, url, base=None):
'''Being called when rendering links.'''
full = urlparse.urljoin(base, str(url))
url_com = urlparse.urlparse(full)[2].split('/')
for durl in self.deny_urls:
durl_com = durl.split('/')
if len(url_com) == len(durl_com):
if reduce(self._sum, map(self._same, url_com, durl_com)):
return True
return False
def _sum(self, x, y):
return x and y
def _same(self, com, dcom):
if com == dcom or dcom == '_':
return True
return False
class UrlManager(ObjectBase):
_get_connection = open_connection
def get(self, kw):
order_sql = self._get_sort_order(kw)
by = kw.get('sort_by')
if by == 'url_id':
by_sql = 'url_id'
elif by == 'url':
by_sql = 'url'
elif by == 'description':
by_sql = 'description'
else:
by_sql = 'url'
sql = '''select url_id, url, description
from usr_url
order by %s %s;
''' % (by_sql, order_sql)
return self._query(sql)
class Url(ObjectBase):
seq_name = 'usr_url_id'
_get_connection = open_connection
def new(self, kw):
kw['url_id'] = self.get_id()
sql = '''insert into usr_url(url_id, url, description)
values(%(url_id)s, %(url)s, %(description)s);
'''
self._query(sql, kw)
def set(self, kw):
if 'url' in kw:
url_name = kw['url']
del kw['url']
keys = kw.keys()
kw['url_name'] = role_name
f = [key + ' = ' + '%(' + key + ')s' for key in keys]
sql = 'update usr_url set ' + ' , '.join(f) \
+ ' where url = %(url_name)s;'
self._query(sql, kw)
def delete(self, kw):
cursor = self._get_connection().get_cursor()
try:
sql = 'select url_id from usr_url where url = %s;'
cursor.execute(sql, [kw.get('url')])
role_id = cursor.fetchone()[0]
sql = '''delete from usr_role_url
where role_id = %s;'''
cursor.execute(sql, [role_id])
sql = 'delete from usr_url where url = %s;'
cursor.execute(sql, [kw.get('url')])
except:
cursor.rollback()
raise
cursor.commit()
def get(self, kw):
sql = '''select url_id, url, description
from usr_url
where url = %s;
'''
return self._query(sql, kw.get('url'))
|
__source__ = 'https://leetcode.com/problems/sequence-reconstruction/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/sequence-reconstruction.py
# Time: O(n * s), n is the size of org, s is the size of seqs
# Space: O(n)
#
# Description: 444. Sequence Reconstruction
#
# Check whether the original sequence org can be uniquely reconstructed from the sequences in seqs.
# The org sequence is a permutation of the integers from 1 to n, with 1 <= n <= 104.
# Reconstruction means building a shortest common supersequence of the sequences i
# n seqs (i.e., a shortest sequence so that all sequences in seqs are subsequences of it).
# Determine whether there is only one sequence that can be reconstructed from seqs and
# it is the org sequence.
#
# Example 1:
#
# Input:
# org: [1,2,3], seqs: [[1,2],[1,3]]
#
# Output:
# false
#
# Explanation:
# [1,2,3] is not the only one sequence that can be reconstructed,
# because [1,3,2] is also a valid sequence that can be reconstructed.
# Example 2:
#
# Input:
# org: [1,2,3], seqs: [[1,2]]
#
# Output:
# false
#
# Explanation:
# The reconstructed sequence can only be [1,2].
# Example 3:
#
# Input:
# org: [1,2,3], seqs: [[1,2],[1,3],[2,3]]
#
# Output:
# true
#
# Explanation:
# The sequences [1,2], [1,3], and [2,3] can uniquely reconstruct the original sequence [1,2,3].
# Example 4:
#
# Input:
# org: [4,1,5,2,6,3], seqs: [[5,2,6,3],[4,1,5,2]]
#
# Output:
# true
# UPDATE (2017/1/8):
# The seqs parameter had been changed to a list of list of strings (instead of a 2d array of strings).
# Please reload the code definition to get the latest changes.
#
# Hide Company Tags Google
# Hide Tags Graph Topological Sort
# Hide Similar Problems (M) Course Schedule II
import unittest
import collections
# Thoughts:
# For org to be uniquely reconstructible from seqs we need to satisfy 2 conditions:
#
# Every sequence in seqs should be a subsequence in org. This part is obvious.
# Every 2 consecutive elements in org should be consecutive elements in some sequence from seqs.
# Why is that? Well, suppose condition 1 is satisfied.
# Then for 2 any consecutive elements x and y in org we have 2 options.
# We have both xand y in some sequence from seqs. Then (as condition 1 is satisfied)
# they must be consequtive elements in this sequence.
# There is no sequence in seqs that contains both x and y.
# In this case we cannot uniquely reconstruct org from seqs as sequence with x and y switched
# would also be a valid original sequence for seqs.
# So this are 2 necessary criterions. It is pretty easy to see that this are also sufficient
# criterions for org to be uniquely reconstructible (there is only 1 way to reconstruct
# sequence when we know that condition 2 is satisfied).
#
# To implement this idea I have idxs hash that maps item to its index in org sequence to check
# condition 1. And I have pairs set that holds all consequitive element pairs for sequences from
# seqs to check condition 2 (I also consider first elements to be paired with previous undefined
# elements, it is necessary to check this).
# Time: O(|V| + |E|)
# Space: O(|E|)
# 216ms 48.84%
class Solution(object):
def sequenceReconstruction(self, org, seqs):
"""
:type org: List[int]
:type seqs: List[List[int]]
:rtype: bool
"""
graph = collections.defaultdict(set)
indegree = collections.defaultdict(int)
integer_set = set()
for seq in seqs:
for i in seq:
integer_set.add(i)
if len(seq) == 1:
if seq[0] not in indegree:
indegree[seq[0]] = 0
continue
for i in xrange(len(seq)-1):
if seq[i] not in indegree:
indegree[seq[i]] = 0
if seq[i+1] not in graph[seq[i]]:
graph[seq[i]].add(seq[i+1])
indegree[seq[i+1]] += 1
cnt_of_zero_indegree = 0
res = []
q = []
for i in indegree:
if indegree[i] == 0:
cnt_of_zero_indegree += 1
if cnt_of_zero_indegree > 1:
return False
q.append(i)
while q:
i = q.pop()
res.append(i)
cnt_of_zero_indegree = 0
for j in graph[i]:
indegree[j] -= 1
if indegree[j] == 0:
cnt_of_zero_indegree += 1
if cnt_of_zero_indegree > 1:
return False
q.append(j)
return res == org and len(org) == len(integer_set)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
# BFS Topological Sort
# 91ms 74.745%
class Solution {
public boolean sequenceReconstruction(int[] org, List<List<Integer>> seqs) {
Map<Integer, Set<Integer>> map = new HashMap<>();
Map<Integer, Integer> indegree = new HashMap<>();
for(List<Integer> seq: seqs) {
if(seq.size() == 1) {
if(!map.containsKey(seq.get(0))) {
map.put(seq.get(0), new HashSet<>());
indegree.put(seq.get(0), 0);
}
} else {
for(int i = 0; i < seq.size() - 1; i++) {
if(!map.containsKey(seq.get(i))) {
map.put(seq.get(i), new HashSet<>());
indegree.put(seq.get(i), 0);
}
if(!map.containsKey(seq.get(i + 1))) {
map.put(seq.get(i + 1), new HashSet<>());
indegree.put(seq.get(i + 1), 0);
}
if(map.get(seq.get(i)).add(seq.get(i + 1))) {
indegree.put(seq.get(i + 1), indegree.get(seq.get(i + 1)) + 1);
}
}
}
}
Queue<Integer> queue = new LinkedList<>();
for(Map.Entry<Integer, Integer> entry: indegree.entrySet()) {
if(entry.getValue() == 0) queue.offer(entry.getKey());
}
int index = 0;
while(!queue.isEmpty()) {
int size = queue.size();
if(size > 1) return false;
int curr = queue.poll();
if(index == org.length || curr != org[index++]) return false;
for(int next: map.get(curr)) {
indegree.put(next, indegree.get(next) - 1);
if(indegree.get(next) == 0) queue.offer(next);
}
}
return index == org.length && index == map.size();
}
}
2.
Java O(n) time,O(n) space AC solution 14ms like count sort
The basic idea is to count how many numbers are smaller(self include) than the current number.
We then compare this count to the org.
It is pretty like the idea of count sort.
# 14ms 96.57%
class Solution {
public boolean sequenceReconstruction(int[] org, List<List<Integer>> seqs) {
int len = org.length;
int[] map = new int[len + 1];//map number to its index
Arrays.fill(map, -1);
int[] memo = new int[org.length];//count how many numbers are smaller(on the right)
for (int i = 0; i < len; i++) {
map[org[i]] = i;
}
for (List<Integer> seq : seqs) {
if (seq.size() == 0) continue;
int prev = seq.get(0);
if (prev <= 0 || prev > len || map[prev] == -1) return false;
for (int i = 1; i < seq.size(); i++) {
int curr = seq.get(i);
if (curr <= 0 || curr > len || map[curr] == -1) return false;
memo[map[prev]] = Math.max(memo[map[prev]], len - map[curr] + 1);
prev = curr;
}
memo[map[prev]] = Math.max(memo[map[prev]], 1);
}
for (int i = 0; i < memo.length; i++) {
if (memo[i] != len - i) return false;
}
return true;
}
}
'''
|
import json
import FulcrumApplicationToSalesforceObject as fts
from fulcrum import Fulcrum
_sfdcPrefix = 'f_'
_sfdcUsername = "your.salesforce@username.com"
_sfdcPassword = "yourSalesforcePassword"
_sfdcToken = "yourSalesforceSecurityToken"
_sfdcSandbox = True
_fulcrumXApiToken = "yourFulcrumAPIToken"
#The specific Fulcrum Application Form you would like to create in salesforce
_fulcrumFormId = "yourFulcumApplicationID"
fulcrum = Fulcrum(key=_fulcrumXApiToken)
fulcrumToSalesforce = fts.FulcrumApplicationToSalesforceObject ()
# Get Individual Fulcrum Form Fulcrum Applications
fulcrumForm = fulcrum.form.find(_fulcrumFormId)
# Create Salesforce Object From Fulcrum Form
fulcrumToSalesforce.construct_fulcrum_sfdc_object (fulcrumForm, 'create')
|
import inspect
import sys
import argparse
import shlex
class OptionError(Exception):
pass
def prompt_str(prompt, regex=None, default=None, loop=True):
return input(prompt)
def prompt_list(prompt, sep=',', loop=True):
pass
def prompt_bool(prompt='y/n? ', default=None, loop=True):
q = raw_input(prompt)
while q not in ['y', 'n', 'yes', 'no', '']:
q = raw_input(prompt)
return q in ['y', 'yes', '']
def prompt_int(prompt, min=None, max=None, default=None, loop=True):
pass
def prompt_choice(prompt, options, default=None, normcase=True, loop=True):
letters = string.ascii_lowercase[:len(options)]
if prompt != None:
print (prompt)
print ('') # indent
for opt in zip(letters, options):
print ('(%s) %s' % opt)
print ('')
q = raw_input()
while q not in letters and q != '':
print ('Please enter only letters from one of the options above, or a blank line to break the loop')
q = raw_input()
return q
def main(fn):
"""Call fn with command line arguments. Used as a decorator.
The main decorator marks the function that starts a program. For example,
@main
def my_run_function():
# function body
Use this instead of the typical __name__ == "__main__" predicate.
"""
if inspect.stack()[1][0].f_locals['__name__'] == '__main__':
args = sys.argv[1:] # Discard the script name from command line
print (fn(*args)) # Call the main function
return fn
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
string = options.get(optname, default)
if normcase:
string = string.lower()
if string not in allowed:
raise OptionError('Value for option %s must be one of %s' %
(optname, ', '.join(map(str, allowed))))
return string
def get_bool_opt(options, optname, default=None):
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif not isinstance(string, basestring):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
elif string.lower() in ('1', 'yes', 'true', 'on'):
return True
elif string.lower() in ('0', 'no', 'false', 'off'):
return False
else:
raise OptionError('Invalid value %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
def get_int_opt(options, optname, default=None):
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError('Invalid type %r for option %s; you '
'must give an integer value' % (
string, optname))
except ValueError:
raise OptionError('Invalid value %r for option %s; you '
'must give an integer value' % (
string, optname))
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
if isinstance(val, basestring):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
else:
raise OptionError('Invalid type %r for option %s; you '
'must give a list value' % (
val, optname))
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def pause():
"""
Pauses the output stream awaiting user feedback.
"""
print ('<Press enter/return to continue>')
raw_input()
class DefaultArguments(argparse.ArgumentParser):
def error(self, message):
raise RuntimeError(message)
class Arguments:
def __init__(self, posix: bool = False, allow_abbrev: bool = False, **kwargs):
self.parser = DefaultArguments(allow_abbrev=allow_abbrev, add_help=False, **kwargs)
self.posix = posix
def add_argument(self, *inputs, **kwargs):
""" Shortcut to argparse.add_argument """
self.parser.add_argument(*inputs, **kwargs)
def parse_args(self, text):
""" Shortcut to argparse.parse_args with shlex implemented """
try:
args = self.parser.parse_args(
shlex.split(text if text else "", posix=self.posix)
)
except Exception as e:
return (f"ArgumentError: {e}", False)
return (args, True)
|
from d6tflow.tasks import TaskData
from d6tflow.targets.torch import PyTorchModel
class PyTorch(TaskData):
"""
Task which saves to .pt models
"""
target_class = PyTorchModel
target_ext = '.pt'
|
import unittest
from prob_distrs import Bernoulli
from prob_distrs import Binomial
class TestBernoulliClass(unittest.TestCase):
def setUp(self):
self.bernoulli = Bernoulli(0.4)
def test_initialization(self):
self.assertEqual(self.bernoulli.p, 0.4, 'p value incorrect')
self.assertEqual(self.bernoulli.n, 1, 'n value incorrect')
def test_calculate_mean(self):
mean = self.bernoulli.calculate_mean()
self.assertEqual(mean, .4)
def test_calculate_stdev(self):
stdev = self.bernoulli.calculate_stdev()
self.assertEqual(round(stdev,2), .49)
def test_pmf(self):
self.assertEqual(round(self.bernoulli.pmf(0), 5), .6)
self.assertEqual(round(self.bernoulli.pmf(1), 5), .4)
def test_cdf(self):
self.assertEqual(self.bernoulli.cdf(-1), 0)
self.assertEqual(self.bernoulli.cdf(0), .6)
self.assertEqual(self.bernoulli.cdf(1), 1)
def test_pmf_assertion_error(self):
with self.assertRaises(AssertionError):
self.bernoulli.pmf(2)
def test_add(self):
bernoulli_one = Bernoulli(.4)
bernoulli_two = Bernoulli(.4)
bernoulli_sum = bernoulli_one + bernoulli_two
self.assertEqual(bernoulli_sum.p, .4)
self.assertEqual(bernoulli_sum.n, 2)
def test_add_with_binomial(self):
bernoulli = Bernoulli(.4)
binomial = Binomial(.4, 5)
bernoulli_sum = bernoulli + binomial
self.assertEqual(bernoulli_sum.p, .4)
self.assertEqual(bernoulli_sum.n, 6)
def test_add_assertion_error(self):
bernoulli_one = Bernoulli(.5)
bernoulli_two = Bernoulli(.4)
with self.assertRaises(AssertionError):
bernoulli_sum = bernoulli_one + bernoulli_two
if __name__ == '__main__':
unittest.main()
|
import unittest
from pathlib import Path
from .. import *
class GameTestCase(unittest.TestCase):
def test_generate_turn(self):
pass #TODO
def test_init01(self):
g = game.Game(num_systems=100)
self.assertEqual(len(g.systems), 100)
def test_init02(self):
g = game.Game(x=0, y=0, z=0, num_systems=100)
self.assertEqual(len(g.systems), 100)
def test_init03(self):
races = []
for i in range(10):
races.append(race.Race())
g = game.Game(races=races, num_systems=0)
self.assertEqual(len(g.systems), 10)
|
compras = ('Arroz', 10.89, 'Feijão', 8.76, 'Tomate', 4.32,
'Sal', 3.50, 'Farinha', 6.43, 'Guarana', 3.43,
'Batata', 5.40, 'Detergente', 2.30, 'Maionese', 4.60)
print('=-=' * 13)
print(f'{"LISTAGEM DE PREÇOS":^37}')
print('=-=' * 13)
for c in range(0, len(compras)):
if c % 2 == 0:
print(f'{compras[c]:.<30}R$ {compras[c+1]:5.2f}')
|
import numpy as np
import pandas as pd
from . import gpabase as b
class TohokuGPA(b.GPACalcModule):
'''
GPA calculation method (Cumulative) of Tohoku University since March 3, 2020.
For details: https://www.tohoku.ac.jp/japanese/studentinfo/education/01/education0110/015_2.pdf (Japanese)
'''
def _calc_score(self, score_col: pd.Series):
score_col.update(score_col // 10 - 5)
score_col.replace(5, 4, inplace=True)
|
import abc
import numpy as np
import mujoco_py
from rllab.core.serializable import Serializable
from sandbox.ours.envs.sawyer.mujoco_env import MujocoEnv
import copy
class SawyerMocapBase(MujocoEnv, Serializable, metaclass=abc.ABCMeta):
"""
Provides some commonly-shared functions for Sawyer Mujoco envs that use
mocap for XYZ control.
"""
mocap_low = np.array([-0.2, 0.5, 0.06])
mocap_high = np.array([0.2, 0.7, 0.6])
def __init__(self, model_name, frame_skip=10):
MujocoEnv.__init__(self, model_name, frame_skip=frame_skip)
# Resets the mocap welds that we use for actuation.
sim = self.sim
if sim.model.nmocap > 0 and sim.model.eq_data is not None:
for i in range(sim.model.eq_data.shape[0]):
if sim.model.eq_type[i] == mujoco_py.const.EQ_WELD:
# Define the xyz + quat of the mocap relative to the hand
sim.model.eq_data[i, :] = np.array(
[0., 0., 0., 1., 0., 0., 0.]
)
def reset_mocap2body_xpos(self):
# move mocap to weld joint
self.data.set_mocap_pos(
'mocap',
np.array([self.data.get_body_xpos('hand')]),
)
self.data.set_mocap_quat(
'mocap',
np.array([self.data.get_body_quat('hand')]),
)
def get_endeff_pos(self):
return self.data.get_body_xpos('hand').copy()
def get_gripper_state(self):
joint_id = self.model.joint_names.index('rc_close')
return self.sim.get_state().qpos[joint_id]
def get_env_state(self):
joint_state = self.sim.get_state()
mocap_state = self.data.mocap_pos, self.data.mocap_quat
state = joint_state, mocap_state
return copy.deepcopy(state)
def set_env_state(self, state):
joint_state, mocap_state = state
self.sim.set_state(joint_state)
mocap_pos, mocap_quat = mocap_state
self.data.set_mocap_pos('mocap', mocap_pos)
self.data.set_mocap_quat('mocap', mocap_quat)
self.sim.forward()
class SawyerXYZEnv(SawyerMocapBase, metaclass=abc.ABCMeta):
def __init__(
self,
*args,
hand_low=(-0.2, 0.55, 0.05),
hand_high=(0.2, 0.75, 0.3),
action_scale=1./100,
**kwargs
):
super().__init__(*args, **kwargs)
self.action_scale = action_scale
self.hand_low = np.array(hand_low)
self.hand_high = np.array(hand_high)
self.mocap_low = np.hstack(hand_low)
self.mocap_high = np.hstack(hand_high)
def set_xyz_action(self, action):
action = np.clip(action, -1, 1)
pos_delta = action * self.action_scale
new_mocap_pos = self.data.mocap_pos + pos_delta[None]
new_mocap_pos[0, :] = np.clip(
new_mocap_pos[0, :],
self.mocap_low,
self.mocap_high,
)
self.data.set_mocap_pos('mocap', new_mocap_pos)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.