content stringlengths 5 1.05M |
|---|
"""
Master class for the AIRC
"""
import asyncio
import logging
from . import server
from .abstracts import Messageable
__all__ = ("User", "Channel", "DefaultClient")
log = logging.getLogger("airc.client")
async def empty_handler(event): pass
def empty_handler_sync(event): pass
class User(Messageable):
def __init__(self, server, name):
self.server = server
self.name = name
def message(self, message):
self.server.privmsg(self.name, message)
class Channel(Messageable):
def __init__(self, server, name):
self.server = server
self.name = name
def message(self, message):
self.server.privmsg(self.name, message)
class DefaultClient:
__slots__ = ("loop", "server_type", "connections", "handlers")
def __init__(self, uris=None, *, server_type=server.DefaultServer, loop=None):
self.loop = loop or asyncio.get_event_loop()
self.server_type = server_type
self.connections = []
self.handlers = {}
# self.add_handler("ping", _ponger)
if not isinstance(uris, (list, tuple)):
uris = (uris,)
for uri in uris:
self.server(uri)
def server(self, uri):
server = self.server_type(uri, self, loop=self.loop)
self.connections.append(server)
return server
def run(self, *args, **kwargs):
task = self.loop.create_task(self.start(*args, **kwargs))
if not self.loop.is_running():
self.loop.run_until_complete(task)
async def start(self, *args, **kwargs):
tasks = []
names = kwargs.get("names", [])
passwds = kwargs.get("passwds", [])
for server in self.connections:
if not server.connected:
# TODO: handle failed connection?
await server.connect(names.pop(0), password=passwds.pop(0))
tasks.append(self.loop.create_task(server.process_data()))
while len(tasks) > 0:
for task in tasks:
if task.done():
tasks.remove(task)
server = await task
tasks.append(self.loop.create_task(server.process_data()))
elif task.cancelled():
tasks.remove(task)
await asyncio.sleep(1)
async def _dispatch(self, event):
all_handler = getattr(self, "on_all_events", None)
if all_handler is not None:
await all_handler(event)
event_handler = getattr(self, "on_" + event.command, None)
if event_handler is not None:
await event_handler(event)
async def on_ping(self, event):
await event.server.pong(event.target)
|
import inspect
from core import reify, run_goal, variables, gen_name
from goals import conj
from stream import SuspendIteration
def run(*args):
if isinstance(args[0], int):
n = args[0]
args = args[1:]
else:
n = None
var, goals = args[0], args[1:]
if isinstance(var, tuple):
raise NotImplementedError("run with multiple fresh variables")
if n is None:
return map(reify(var), run_goal(conj(*goals)))
else:
return map(reify(var), run_goal(n, conj(*goals)))
def fresh(body):
spec = inspect.signature(body)
var_names = spec.parameters.keys()
fresh_vars = [variables(gen_name(name)) for name in var_names]
subgoals = body(*fresh_vars)
try:
return conj(*subgoals)
except TypeError:
return subgoals
def defrel(func):
# return func
# return lambda *args: suspend(func(*args))
# return lambda *args: lambda s: func(*args)(s)
# tmp = lambda s, *args: func(*args, s)
def wrapper(*args):
def goal(s):
raise SuspendIteration(func(*args)(s))
yield
return goal
return wrapper
|
#!/usr/bin/python
try: import simplejson as json
except ImportError: import json
import couchdb
import httplib
import urllib
import common
import unittest
HOST = "localhost:5984"
SET_NAME = "test_suite_set_view"
# Can't use something higher than 96 on Mac OS X, start getting
# system_limit errors due to too many file descriptors used
NUM_PARTS = 96
NUM_DOCS = 2000
DDOC = {
"_id": "_design/test",
"language": "javascript",
"views": {
"mapview": {
"map": "function(doc) { emit(doc.integer, doc.string); }"
}
}
}
class TestManyPartitions(unittest.TestCase):
def setUp(self):
self._params = {
"host": HOST,
"ddoc": DDOC,
"nparts": NUM_PARTS,
"ndocs": NUM_DOCS,
"setname": SET_NAME,
"server": couchdb.Server(url = "http://" + HOST)
}
# print "Creating databases"
common.create_dbs(self._params)
common.populate(self._params)
# print "Databases created"
active_parts = range(self._params["nparts"])
common.define_set_view(self._params, active_parts, [])
def tearDown(self):
# print "Deleting test data"
common.create_dbs(self._params, True)
def test_many_partitions(self):
total_doc_count = common.set_doc_count(self._params, range(self._params["nparts"]))
# print "Querying view"
(resp, view_result) = common.query(self._params, "mapview")
self.assertEqual(len(view_result["rows"]), total_doc_count, "number of received rows is %d" % total_doc_count)
self.assertEqual(view_result["total_rows"], total_doc_count, "total_rows is %d" % total_doc_count)
common.test_keys_sorted(view_result)
# print "Verifying group info"
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], range(self._params["nparts"]), "right active partitions list")
self.assertEqual(info["passive_partitions"], [], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
for i in xrange(self._params["nparts"]):
expected_seq = common.partition_update_seq(self._params, i)
self.assertEqual(info["update_seqs"][str(i)], expected_seq,
"right update seq number (%d) for partition %d" % (expected_seq, i + 1))
# print "Adding 1 new document to each partition"
new_docs = []
for i in xrange(self._params["nparts"]):
server = self._params["server"]
db = self._params["server"][self._params["setname"] + "/" + str(i)]
value = total_doc_count + i + 1
new_doc = {
"_id": str(value),
"integer": value,
"string": str(value)
}
new_docs.append(new_doc)
db.save(new_doc)
new_total_doc_count = common.set_doc_count(self._params, range(self._params["nparts"]))
self.assertEqual(new_total_doc_count, (total_doc_count + len(new_docs)), "N documents were added")
total_doc_count = new_total_doc_count
# print "Querying view again"
(resp, view_result) = common.query(self._params, "mapview")
self.assertEqual(len(view_result["rows"]), total_doc_count, "number of received rows is %d" % total_doc_count)
self.assertEqual(view_result["total_rows"], total_doc_count, "total_rows is %d" % total_doc_count)
common.test_keys_sorted(view_result)
# print "Verifying group info"
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], range(self._params["nparts"]), "right active partitions list")
self.assertEqual(info["passive_partitions"], [], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
for i in xrange(self._params["nparts"]):
expected_seq = common.partition_update_seq(self._params, i)
self.assertEqual(info["update_seqs"][str(i)], expected_seq,
"right update seq number (%d) for partition %d" % (expected_seq, i + 1))
# print "Marking half of the partitions as passive"
passive = range(self._params["nparts"] / 2, self._params["nparts"])
active = range(self._params["nparts"] / 2)
common.set_partition_states(
self._params,
passive = range(self._params["nparts"] / 2, self._params["nparts"])
)
# print "Verifying group info"
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], active, "right active partitions list")
self.assertEqual(info["passive_partitions"], passive, "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
for i in xrange(self._params["nparts"]):
expected_seq = common.partition_update_seq(self._params, i)
self.assertEqual(info["update_seqs"][str(i)], expected_seq,
"right update seq number (%d) for partition %d" % (expected_seq, i + 1))
# print "Querying view again"
(resp, view_result) = common.query(self._params, "mapview")
expected_row_count = common.set_doc_count(self._params, active)
self.assertEqual(len(view_result["rows"]), expected_row_count, "number of received rows is %d" % expected_row_count)
common.test_keys_sorted(view_result)
for row in view_result['rows']:
if row["key"] >= 2001:
key_part = ((row["key"] - 2000) % self._params["nparts"]) - 1
else:
key_part = (row["key"] % self._params["nparts"]) - 1
self.assertTrue(key_part in active, "Key %d from passive partition not in result set" % row["key"])
# print "Marking half of the partitions for cleanup"
common.set_partition_states(
self._params,
cleanup = passive
)
cleanup = passive
common.compact_set_view(self._params)
# print "Verifying group info"
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], active, "right active partitions list")
self.assertEqual(info["passive_partitions"], [], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
self.assertEqual(info["stats"]["compactions"], 1, "1 compaction")
self.assertEqual(info["stats"]["cleanups"], 1, "1 full cleanup")
for i in active:
expected_seq = common.partition_update_seq(self._params, i)
self.assertEqual(info["update_seqs"][str(i)], expected_seq,
"right update seq number (%d) for partition %d" % (expected_seq, i + 1))
|
# -*- coding: utf-8 -*-
import os
from unittest import TestCase
import json
import requests
import responses
from odata import ODataService
from odata.entity import EntityBase
path = os.path.join(os.path.dirname(__file__), 'demo_metadata.xml')
with open(path, mode='rb') as f:
metadata_xml = f.read()
class TestMetadataImport(TestCase):
def test_read(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.GET, 'http://demo.local/odata/$metadata/',
body=metadata_xml, content_type='text/xml')
Service = ODataService('http://demo.local/odata/', reflect_entities=True)
self.assertIn('Product', Service.entities)
# non-entityset things should not be listed in entities
expected_keys = {'Product', 'ProductWithNavigation', 'Manufacturer',
'ProductManufacturerSales'}
self.assertEqual(set(Service.entities.keys()), expected_keys)
Product = Service.entities['Product']
ProductWithNavigation = Service.entities['ProductWithNavigation']
assert issubclass(Product, EntityBase)
assert hasattr(Product, 'DemoCollectionAction')
test_product = Product()
# shortcut for saving the entity
test_product.__odata__.persisted = True
assert hasattr(test_product, 'DemoActionWithParameters')
assert hasattr(ProductWithNavigation, 'Manufacturer')
self.assertIn('Manufacturer', Service.entities)
self.assertIn('DemoUnboundAction', Service.actions)
def test_computed_value_in_insert(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.GET, 'http://demo.local/odata/$metadata/',
body=metadata_xml, content_type='text/xml')
Service = ODataService('http://demo.local/odata/', reflect_entities=True)
Product = Service.entities['Product']
test_product = Product()
def request_callback_part(request):
payload = json.loads(request.body)
self.assertNotIn('ExampleComputed', payload)
headers = {}
return requests.codes.created, headers, json.dumps(payload)
with responses.RequestsMock() as rsps:
rsps.add_callback(
rsps.POST, Product.__odata_url__(),
callback=request_callback_part,
content_type='application/json',
)
Service.save(test_product)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from .test_artifact_manager import TestArtifactManager
from .test_builder_manager import TestBuilderManager
from .test_extension_manager import TestExtensionManager
from .test_organization_manager import TestOrganizationManager
from .test_pool_manager import TestPoolManager
from .test_project_manager import TestProjectManager
from .test_release_manager import TestReleaseManager
from .test_repository_manager import TestRepositoryManager
from .test_service_endpoint_manager import TestServiceEndpointManager
from .test_yaml_manager import TestYamlManager
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.0.5 on 2018-06-06 04:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('archives_app', '0003_auto_20180606_0822'),
]
operations = [
migrations.RenameField(
model_name='category',
old_name='subdirs',
new_name='parent',
),
]
|
from os import getenv
class Config:
def __init__(self):
self.database = getenv("DB_PATH", "mangaloid.db")
self.http_port = int(getenv("HTTP_PORT", "1337"))
self.admin_ips = [i.strip() for i in getenv("ADMIN_IPS", "").split(",")]
self.instance_name = getenv("INSTANCE_NAME")
self.instance_address = getenv("INSTANCE_ADDRESS")
self.instance_operator = getenv("INSTANCE_OPERATOR")
self.instance_icon = getenv("INSTANCE_ICON")
self.instance_description = getenv("INSTANCE_DESCRIPTION")
self.upload_ipfs_node = getenv("UPLOAD_IPFS", "http://127.0.0.1:5001")
self.thumbnail_path = getenv("THUMBNAIL_PATH", "~/mangaloid_thumbnails")
self.max_tags = int(getenv("MAX_TAGS", "5"))
self.max_results = int(getenv("MAX_RESULTS", 50))
config = Config() |
import os
from .model import (
Resnet50,
Vgg16,
)
MODEL = {
"resnet50": Resnet50,
"vgg16": Vgg16
}
### Export model
model = MODEL[os.getenv("visual_model.model")]
model = model().to(os.getenv("visual_model.device")).eval() |
#! /usr/bin/env python3
import numpy as np
import tkinter as tk
def coords2xbm(coords, returnOffset=False, joinstr=', '):
"""
Draw an XBM-formatted image from coordinates.
:param coords: The coordinates to be set as image foreground
:type coords: (N,2)-shaped numpy array with x-values in first and y-values in second column
:param returnOffset: Flag whether to return the offset or not
:type returnOffset: bool
:param joinstr: The string to be used for joining the byte values; defaults to ", "
:type joinstr: str
:return: if ``returnOffset``, a tuple of a tuple of the x- and y-offset and the image string, else only the image string
"""
# Assess coordinate range
x_min = coords[:,0].min()
x_max = coords[:,0].max()
y_min = coords[:,1].min()
y_max = coords[:,1].max()
n_cols = np.ceil(x_max - x_min + 1).astype(np.uint)
n_bytes = np.ceil(n_cols / 8).astype(np.uint)
n_rows = np.ceil(y_max - y_min + 1).astype(np.uint)
# Normalize coordinates (eliminate offset)
coords = np.round(coords - np.array([[x_min, y_min]])).astype(np.uint)
# Write pixel data
bm = np.zeros([n_rows, n_bytes], dtype=np.uint8)
for x, y in coords:
byte = np.floor(x / 8).astype(np.uint)
bit = (x % 8).astype(np.uint8)
bm[y,byte] |= 1 << bit
# Convert pixel data to XBM format
bmx = joinstr.join("{:#x}".format(b) for b in bm.flat)
xbm = "#define im_width {:d}\n#define im_height {:d}\nstatic char im_bits[] = {{\n{}\n}};".format(n_cols, n_rows, bmx)
if returnOffset:
return (x_min, y_min), xbm
return xbm
if __name__ == "__main__":
coords = np.array([[0,0], [2,0], [4,0], [6,0], [1,2], [2,2], [4,2], [5,2], [1,3], [2,3], [4,3], [5,3], [3,5], [3,6], [1,7], [5,7], [2,8], [4,8], [3,8]], dtype=np.uint)
xbm = coords2xbm(coords)
print(xbm)
with open("out.xbm", "w") as f:
f.write(xbm)
root = tk.Tk()
canvas = tk.Canvas(root, highlightthickness=0)
canvas.pack()
bitmap = tk.BitmapImage(data=xbm, master=root)
canvas.bm = bitmap
canvas.create_image(0, 0, image=bitmap, anchor=tk.NW)
root.mainloop()
|
import os
import subprocess
import glob
from .env import IS_CONDA, IS_LINUX, IS_WINDOWS, CONDA_DIR, check_env_flag, check_negative_env_flag, gather_paths
from .cuda import USE_CUDA
# On ROCm, RCCL development isn't complete. https://github.com/ROCmSoftwarePlatform/rccl
USE_DISTRIBUTED = not check_negative_env_flag("USE_DISTRIBUTED") and not IS_WINDOWS and not check_env_flag("USE_ROCM")
USE_DISTRIBUTED_MW = USE_DISTRIBUTED and check_env_flag("USE_DISTRIBUTED_MW")
USE_GLOO_IBVERBS = False
USE_C10D = USE_DISTRIBUTED and USE_CUDA and IS_LINUX
IB_DEVINFO_CMD = "ibv_devinfo"
def get_command_path(command):
"""
Helper function that checks if the command exists in the path and gets the
full path of a given linux command if it exists.
"""
def excutable(command_path):
return os.path.isfile(command_path) and os.access(command_path, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
command_path = os.path.join(path, command)
if excutable(command_path):
return command_path
return None
def should_build_ib():
"""
Helper function that detects the system's IB support and returns if we
should build with IB support.
"""
ib_util_found = False
ib_lib_found = False
ib_header_found = False
try:
# If the command doesn't exist, we can directly return instead of
# making a subprocess call
full_cmd_path = get_command_path(IB_DEVINFO_CMD)
if not full_cmd_path:
ib_util_found = False
subprocess.check_output([full_cmd_path, "--list"])
# Here we just would like to simply run the command to test if IB
# related tools / lib are installed without parsing the output. We
# will enable IB build as long as the command runs successfully.
#
# The output should look like either:
#
# > ibv_devinfo --list
# 0 HCAs founds:
#
# or
#
# > ibv_devinfo --list
# 4 HCAs found:
# mlx5_3
# mlx5_2
# mlx5_1
# mlx5_0
ib_util_found = True
except Exception:
# We just take all the exceptions here without affecting the build
ib_util_found = False
lib_paths = list(filter(bool, [
"/usr/lib/",
"/usr/lib/x86_64-linux-gnu/",
"/usr/lib/powerpc64le-linux-gnu/",
"/usr/lib/aarch64-linux-gnu/",
] + gather_paths([
"LIBRARY_PATH",
]) + gather_paths([
"LD_LIBRARY_PATH",
])))
include_paths = [
"/usr/include/",
]
if IS_CONDA:
lib_paths.append(os.path.join(CONDA_DIR, "lib"))
include_paths.append(os.path.join(CONDA_DIR, "include"))
for path in lib_paths:
if path is None or not os.path.exists(path):
continue
ib_libraries = sorted(glob.glob(os.path.join(path, "libibverbs*")))
if ib_libraries:
ib_lib_found = True
break
for path in include_paths:
if path is None or not os.path.exists(path):
continue
if os.path.exists(os.path.join(path, "infiniband/verbs.h")):
ib_header_found = True
break
return ib_util_found and ib_lib_found and ib_lib_found
if USE_DISTRIBUTED:
# If the env variable is specified, use the value,
# otherwise only build with IB when IB support is detected on the system
if "USE_GLOO_IBVERBS" in os.environ:
USE_GLOO_IBVERBS = check_env_flag("USE_GLOO_IBVERBS")
else:
USE_GLOO_IBVERBS = should_build_ib()
|
import re
def text_match(text):
patterns = '^[a-z]+_[a-z]+$'
if re.search(patterns, text):
return 'Found a match!'
else:
return('Not matched!')
print(text_match("aab_cbbbc"))
print(text_match("aab_Abbbc"))
print(text_match("Aaab_abbbc")) |
import dbc.ast as ast
from dbc.visit import Visitor
from dbc.errors import CheckError
from collections import OrderedDict
class TypeChecker(Visitor):
""" This class extends Visitor and is responsible for checking for any type errors like for example:
- wrong type of arguments for function calls.
- using the result of a function without return-values in calculations
Additionaly it tags all expression Nodes with a type.
"""
def __init__(self):
""" Point at the root node of the programm"""
self.rootnode = None
""" Points at the function node that is currently processed """
self.currentfunc = None
super().__init__()
def check(self, node):
""" Main method for the checker. Checks the given programm for type errors"""
self.rootnode = node
return self.visitProgramm(node)
def visitUnary(self, node):
self.visit(node.val)
# currently there is only one unary operation and it can only be applied to INTs
if node.val.type != "INT":
raise CheckError(
"Unary operation '-' can only be performed on INTs and not on "+node.val.type, node)
# unary operations inherit the type of their operand
node.type = node.val.type
def visitBinary(self, node):
self.visit(node.val1)
self.visit(node.val2)
# operations can only be performed if both operands have the same type
if node.val1.type != node.val1.type:
raise CheckError(
"Both operands of a binary operation need to have the same type", node)
if node.op in ["+", "-", "*", "/", ">=", "<=", ">", "<"] and (node.val1.type != "INT"):
raise CheckError(
"Both operands of operations +,-,*,/,>,<,>=,<= must be of type INT", node)
# at this point it is guaranteed that val1 and val2 have the same type. If one of them is None also val1 is.
if node.val1 == None:
raise CheckError(
"Cannot perform binary operation on None-type", node)
# comparisons always return BOOL. Everything else inherits the type of the operands
if node.op in ["==", "!=", ">=", "<=", "<", ">"]:
node.type = "BOOL"
else:
node.type = node.val1.type
def visitVar(self, node):
# get the type from the declaration of the variable
if node.name in self.currentfunc.localvartypes:
node.type = self.currentfunc.localvartypes[node.name]
else:
node.type = self.rootnode.globalvartypes[node.name]
def visitConst(self, node):
# consts are getting their type set by the parser. (Yes, this is a strange exception...)
pass
def visitStr(self, node):
# type for string-constants
node.type = "CONSTSTR"
def visitReturn(self, node):
# a return inherits the type of it's expression, or none if it does not have an expression
if node.expression:
self.visit(node.expression)
node.type = node.expression.type
else:
node.type = None
# can not return a value from a 'void' function (or void from an INT function)
if node.type != self.currentfunc.returntype:
raise CheckError(
"The type of the value to return ({}) must match the type of the function ({})".format(node.type, self.currentfunc.returntype), node)
def visitFuncdef(self, node):
self.currentfunc = node
for statement in node.statements:
self.visit(statement)
if node.name == "main":
if node.returntype != "INT":
raise CheckError("Main-method must return INT", node)
if len(node.args) != 0:
raise CheckError(
"Main-method does not take any arguments", node)
def visitAssign(self, node):
self.visit(node.value)
# find the type of the variable. Could be a global or a local variable
if self.currentfunc != None and node.name in self.currentfunc.localvartypes:
vartype = self.currentfunc.localvartypes[node.name]
else:
vartype = self.rootnode.globalvartypes[node.name]
# make sure the assigned value has the same type as the variable
if node.value.type != vartype:
raise CheckError(
"Cannot assign {} type value to a {}-Variable".format(node.value.type, vartype), node)
def visitLocaldef(self, node):
self.visit(node.value)
# a defintion is always also an assignment. pass it on.
self.visitAssign(node)
def visitGlobaldef(self, node):
self.visit(node.value)
# a defintion is always also an assignment. pass it on.
self.visitAssign(node)
def visitIf(self, node):
self.visit(node.exp)
if node.exp.type != "BOOL":
raise CheckError(
"IF condition has to return a BOOL. Instead found: "+node.exp.type, node)
for statement in node.statements:
self.visit(statement)
if node.elsestatements:
for statement in node.elsestatements:
self.visit(statement)
pass
def visitWhile(self, node):
self.visit(node.exp)
if node.exp.type != "BOOL":
raise CheckError(
"WHILE condition has to return a BOOL. Instead found: "+node.exp.type, node)
for statement in node.statements:
self.visit(statement)
def visitCall(self, node):
# special treatment for builtin functions
if node.name == "input":
if len(node.args) > 0:
raise CheckError(
"input() does not take any arguments", node)
node.type = "INT"
elif node.name == "print":
if len(node.args) < 1:
raise CheckError(
"print() needs at least one argument", node)
self.visit(node.args[0])
if node.args[0].type != "CONSTSTR":
raise CheckError(
"First argument to print must be a string", node)
node.type = None
# every other function
else:
# find the definition of the function
funcdef = None
for f in self.rootnode.funcdefs:
if f.name == node.name:
funcdef = f
break
if not funcdef:
# we did not find a definition for this function. It is probably an extern function
# there is no type checking to do
node.type = None
return
# the type of the call's resut is the return-type of the function
node.type = funcdef.returntype
if len(node.args) != len(funcdef.args):
raise CheckError("Function {} expects {} args. Found: {}".format(
node.name, len(funcdef.args), len(node.args)), node)
for i, arg in enumerate(node.args):
self.visit(arg)
# Make sure we do not pass a None-type to a function
if arg.type != funcdef.argtypes[i]:
raise CheckError(
"Argument number {} for function {} needs to be of type {}, not {}".format(i, node.name, funcdef.argtypes[i], arg.type), node)
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ScaNN utils.
Originally from the //third_party/py/language/orqa codebase, but converted to
TF 2 and modified for the needs of the project.
"""
import scann
import tensorflow as tf
builder = scann.scann_ops_pybind.builder
def load_scann_searcher(var_name,
checkpoint_path,
num_neighbors,
dimensions_per_block=2,
num_leaves=1000,
num_leaves_to_search=100,
training_sample_size=100000,
reordering_num_neighbors=0):
"""Load scann searcher from checkpoint."""
with tf.device("/cpu:0"):
ckpt = tf.train.load_checkpoint(checkpoint_path)
try:
np_db = ckpt.get_tensor(var_name)
except tf.errors.NotFoundError:
np_db = ckpt.get_tensor(var_name + "/.ATTRIBUTES/VARIABLE_VALUE")
builder_intance = builder(
db=np_db,
num_neighbors=num_neighbors,
distance_measure="dot_product")
builder_intance = builder_intance.tree(
num_leaves=num_leaves,
num_leaves_to_search=num_leaves_to_search,
training_sample_size=training_sample_size)
builder_intance = builder_intance.score_ah(
dimensions_per_block=dimensions_per_block)
if reordering_num_neighbors:
builder_intance = builder_intance.reorder(
reordering_num_neighbors=reordering_num_neighbors)
searcher = builder_intance.build()
return np_db, searcher
|
from nose.tools import istest, assert_equal
from mammoth.docx.xmlparser import element as xml_element
from mammoth.docx.numbering_xml import read_numbering_xml_element
from mammoth.docx.styles_xml import NumberingStyle, Styles
@istest
def find_level_returns_none_if_num_with_id_cannot_be_found():
numbering = _read_numbering_xml_element(xml_element("w:numbering"))
assert_equal(None, numbering.find_level("47", "0"))
_sample_numbering_xml = xml_element("w:numbering", {}, [
xml_element("w:abstractNum", {"w:abstractNumId": "42"}, [
xml_element("w:lvl", {"w:ilvl": "0"}, [
xml_element("w:numFmt", {"w:val": "bullet"})
]),
xml_element("w:lvl", {"w:ilvl": "1"}, [
xml_element("w:numFmt", {"w:val": "decimal"})
])
]),
xml_element("w:num", {"w:numId": "47"}, [
xml_element("w:abstractNumId", {"w:val": "42"})
])
])
@istest
def level_includes_level_index():
numbering = _read_numbering_xml_element(_sample_numbering_xml)
assert_equal("0", numbering.find_level("47", "0").level_index)
assert_equal("1", numbering.find_level("47", "1").level_index)
@istest
def list_is_not_ordered_if_formatted_as_bullet():
numbering = _read_numbering_xml_element(_sample_numbering_xml)
assert_equal(False, numbering.find_level("47", "0").is_ordered)
@istest
def list_is_ordered_if_formatted_as_decimal():
numbering = _read_numbering_xml_element(_sample_numbering_xml)
assert_equal(True, numbering.find_level("47", "1").is_ordered)
@istest
def find_level_returns_none_if_level_cannot_be_found():
numbering = _read_numbering_xml_element(_sample_numbering_xml)
assert_equal(None, numbering.find_level("47", "2"))
@istest
def when_abstract_num_has_num_style_link_then_style_is_used_to_find_num():
numbering = _read_numbering_xml_element(
xml_element("w:numbering", {}, [
xml_element("w:abstractNum", {"w:abstractNumId": "100"}, [
xml_element("w:lvl", {"w:ilvl": "0"}, [
xml_element("w:numFmt", {"w:val": "decimal"}),
]),
]),
xml_element("w:abstractNum", {"w:abstractNumId": "101"}, [
xml_element("w:numStyleLink", {"w:val": "List1"}),
]),
xml_element("w:num", {"w:numId": "200"}, [
xml_element("w:abstractNumId", {"w:val": "100"}),
]),
xml_element("w:num", {"w:numId": "201"}, [
xml_element("w:abstractNumId", {"w:val": "101"}),
])
]),
styles=Styles.create(numbering_styles={"List1": NumberingStyle(num_id="200")}),
)
assert_equal(True, numbering.find_level("201", "0").is_ordered)
# See: 17.9.23 pStyle (Paragraph Style's Associated Numbering Level) in ECMA-376, 4th Edition
@istest
def numbering_level_can_be_found_by_paragraph_style_id():
numbering = _read_numbering_xml_element(
xml_element("w:numbering", {}, [
xml_element("w:abstractNum", {"w:abstractNumId": "42"}, [
xml_element("w:lvl", {"w:ilvl": "0"}, [
xml_element("w:numFmt", {"w:val": "bullet"}),
]),
]),
xml_element("w:abstractNum", {"w:abstractNumId": "43"}, [
xml_element("w:lvl", {"w:ilvl": "0"}, [
xml_element("w:pStyle", {"w:val": "List"}),
xml_element("w:numFmt", {"w:val": "decimal"}),
]),
]),
]),
)
assert_equal(True, numbering.find_level_by_paragraph_style_id("List").is_ordered)
assert_equal(None, numbering.find_level_by_paragraph_style_id("Paragraph"))
def _read_numbering_xml_element(element, styles=None):
if styles is None:
styles = Styles.EMPTY
return read_numbering_xml_element(element, styles=styles)
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from bs4 import BeautifulSoup
import requests
from itertools import chain
from tqdm import tqdm_notebook as tqdm
import re
# In[51]:
def parse_bat_table(tbl):
columns = tbl.find_all('th')
columns = [col.text for col in columns]
rows = tbl.find('tbody').find_all('tr')
data = []
for row in rows:
cols = row.find_all('td')
classes = cols[0].get("class")
if classes is not None and "batsman-cell" in classes:
data.append([col.text for col in cols])
# Append tfoot and check for did not bat.
did_not_bat = tbl.find('tfoot').find_all('tr')[1].text.split('Did not bat:')[1].strip('').split(',')
[data.append([bat, 'not out', None, None, None, None, None, None])for bat in did_not_bat]
# After parsing into a dataframe, clean and return it.
df = pd.DataFrame(data=data, columns=columns)
df = df.apply(lambda x: clean_column(x), axis=0)
df['is_not_out'] = df['\xa0'].apply(lambda x: True if pd.isna(x)!=True and 'not out' in x else False)
del df['\xa0']
return df
# In[57]:
def parse_bowl_table(tbl, player_list):
columns = tbl.find_all('th')
columns = [col.text for col in columns]
rows = tbl.find('tbody').find_all('tr')
data = []
for row in rows:
cols = row.find_all('td')
data.append([col.text for col in cols])
bowl_df = pd.DataFrame(data=data, columns=columns)
diff_players = list(set(player_list).difference(set(bowl_df['BOWLING'])))
no_bowl = []
[no_bowl.append([player, None, None, None, None, None, None, None, None, None, None]) for player in diff_players]
no_bowl = pd.DataFrame(data = no_bowl, columns = columns)
df = pd.concat([bowl_df, no_bowl])
df = df.apply(lambda x: clean_column(x), axis=0)
return df
# In[79]:
def clean_column(col):
col = col.apply(lambda x: x.split('(')[0].strip() if pd.isna(x)!=True else x)
col = col.apply(lambda x: re.sub('[^A-Za-z0-9. ]+', '', x) if pd.isna(x)!=True else x)
col = col.fillna('0')
return col
# In[92]:
def scrape_score_table(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
batsman_tables = soup.find_all('table', class_='batsman')
bowling_tables = soup.find_all('table', class_='bowler')
team1_bat = parse_bat_table(batsman_tables[0])
team2_bat = parse_bat_table(batsman_tables[1])
team1_bowl = parse_bowl_table(bowling_tables[1], team1_bat['BATTING'])
team2_bowl = parse_bowl_table(bowling_tables[0], team2_bat['BATTING'])
team1_merged = team1_bat.merge(team1_bowl, left_on='BATTING', right_on='BOWLING', how='inner', suffixes=('_bat', '_bowl') )
del team1_merged['BOWLING']
team1_merged.rename(columns={'BATTING': 'player_name'}, inplace=True)
team2_merged = team2_bat.merge(team2_bowl, left_on='BATTING', right_on='BOWLING', how='inner', suffixes=('_bat', '_bowl') )
del team2_merged['BOWLING']
team2_merged.rename(columns = {'BATTING': 'player_name'}, inplace=True)
concated_df = pd.concat([team1_merged, team2_merged])
concated_df.set_index('player_name', inplace=True)
return concated_df.T.to_dict()
# In[6]:
URL1 = 'https://www.espncricinfo.com/series/ipl-2021-1249214/punjab-kings-vs-delhi-capitals-29th-match-1254086/full-scorecard'
URL2 = 'https://www.espncricinfo.com/series/ipl-2021-1249214/rajasthan-royals-vs-sunrisers-hyderabad-28th-match-1254085/full-scorecard'
URL3 = 'https://www.espncricinfo.com/series/ipl-2021-1249214/punjab-kings-vs-royal-challengers-bangalore-26th-match-1254083/full-scorecard'
# In[98]:
scrape_score_table(URL1)
# In[95]:
scrape_score_table(URL2)
# In[96]:
scrape_score_table(URL3)
# In[ ]:
|
"""Unit test package for dji_asdk_to_python."""
|
import numpy as np
import pandas as pd
import logging
import plotly.graph_objects as go
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime, timedelta
import scipy.io
DATA_PATH = 'data/nasa-randomized/'
NOMINAL_CAPACITY = 2.2
class NasaRandomizedData():
def __init__(self, base_path="./"):
self.path = base_path + DATA_PATH
self.logger = logging.getLogger()
def get_discharge_whole_cycle_future(self, train_names, test_names):
self.logger.info("Loading train data...")
(train_x, train_y, battery_n_cycle_train,
time_train, current_train) = self._get_data(train_names)
self.logger.info("Loading test data...")
(test_x, test_y, battery_n_cycle_test,
time_test, current_test) = self._get_data(test_names)
self.logger.info('''Train x: %s, train y soh: %s | Test x: %s, test y soh: %s |
battery n cycle train: %s, battery n cycle test: %s,
time train: %s, time test: %s |
raw current train: %s, raw current test: %s |
''' %
(train_x.shape, train_y.shape, test_x.shape, test_y.shape,
battery_n_cycle_train.shape, battery_n_cycle_test.shape,
time_train.shape, time_test.shape,
current_train.shape, current_test.shape))
return (train_x, train_y, test_x, test_y,
battery_n_cycle_train, battery_n_cycle_test,
time_train, time_test,
current_train, current_test)
def _get_data(self, names):
cycle_x = []
cycle_y = []
first_y = True
y_between_count = 0
battery_n_cycle = []
time = []
current = []
n_cycles = 0
max_step = 0
for name in names:
self.logger.info("Processing file %s" % name)
raw_data = scipy.io.loadmat(self.path + name)['data'][0][0][0][0]
cycle = pd.DataFrame(raw_data)
cycle_num = 0
cycle['cycle'] = cycle_num
current_type = cycle.loc[0, 'type']
for index in range(1, len(cycle.index)):
if ((current_type == "C" and cycle.loc[index, 'type'] == "D") or
(current_type == "D" and cycle.loc[index, 'type'] == "C") or
(current_type == "R" and cycle.loc[index, 'type'] != "R")):
current_type = cycle.loc[index, 'type']
cycle_num += 1
cycle.loc[index, 'cycle'] = cycle_num
for x in set(cycle["cycle"]):
if cycle.loc[cycle["cycle"] == x, "type"].iloc[0] != "D":
continue
cycle_x.append(np.column_stack([
np.hstack(cycle.loc[cycle["cycle"] == x, "voltage"].to_numpy().flatten()).flatten(),
np.hstack(cycle.loc[cycle["cycle"] == x, "current"].to_numpy().flatten()).flatten(),
np.hstack(cycle.loc[cycle["cycle"] == x, "temperature"].to_numpy().flatten()).flatten()]))
n_cycles += 1
step_time = np.hstack(cycle.loc[cycle["cycle"] == x, "time"].to_numpy().flatten()).flatten()
time.append(step_time / 3600)
current.append(np.hstack(cycle.loc[cycle["cycle"] == x, "current"].to_numpy().flatten()).flatten())
max_step = max([max_step, cycle_x[-1].shape[0]])
if (cycle.loc[cycle["cycle"] == x, "comment"].iloc[0] == "reference discharge" and
(x < 2 or cycle.loc[cycle["cycle"] == x-2, "comment"].iloc[0] != "reference discharge")):
current_y = np.trapz(current[-1], np.hstack(cycle.loc[cycle["cycle"] == x, "time"].to_numpy().flatten()).flatten())/3600
if y_between_count > 0:
step_y = (cycle_y[-1] - current_y)/y_between_count
while y_between_count > 0:
cycle_y.append(cycle_y[-1]-step_y)
y_between_count -=1
cycle_y.append(current_y)
elif first_y is True:
cycle_y.append(NOMINAL_CAPACITY)
else:
y_between_count += 1
first_y = False
while y_between_count > 0:
cycle_y.append(cycle_y[-1])
y_between_count -=1
first_y = True
battery_n_cycle.append(n_cycles)
cycle_x = self._to_padded_numpy(cycle_x, [len(cycle_x), max_step, len(cycle_x[0][0])])
cycle_y = np.array(cycle_y)
battery_n_cycle = np.array(battery_n_cycle)
time = self._to_padded_numpy(time, [len(time), max_step])
current = self._to_padded_numpy(current, [len(current), max_step])
return cycle_x, cycle_y, battery_n_cycle, time, current
def _to_padded_numpy(self, l, shape):
padded_array = np.zeros(shape)
for i,j in enumerate(l):
padded_array[i][0:len(j)] = j
return padded_array
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
train_names = [
'Battery_Uniform_Distribution_Variable_Charge_Room_Temp_DataSet_2Post/data/Matlab/RW1',
#'Battery_Uniform_Distribution_Variable_Charge_Room_Temp_DataSet_2Post/data/Matlab/RW2',
#'Battery_Uniform_Distribution_Variable_Charge_Room_Temp_DataSet_2Post/data/Matlab/RW7',
#'Battery_Uniform_Distribution_Discharge_Room_Temp_DataSet_2Post/data/Matlab/RW3',
#'Battery_Uniform_Distribution_Discharge_Room_Temp_DataSet_2Post/data/Matlab/RW4',
#'Battery_Uniform_Distribution_Discharge_Room_Temp_DataSet_2Post/data/Matlab/RW5',
#'Battery_Uniform_Distribution_Charge_Discharge_DataSet_2Post/data/Matlab/RW9',
#'Battery_Uniform_Distribution_Charge_Discharge_DataSet_2Post/data/Matlab/RW10',
#'Battery_Uniform_Distribution_Charge_Discharge_DataSet_2Post/data/Matlab/RW11',
#'RW_Skewed_Low_Room_Temp_DataSet_2Post/data/Matlab/RW13',
#'RW_Skewed_Low_Room_Temp_DataSet_2Post/data/Matlab/RW14',
#'RW_Skewed_Low_Room_Temp_DataSet_2Post/data/Matlab/RW15',
#'RW_Skewed_High_Room_Temp_DataSet_2Post/data/Matlab/RW17',
#'RW_Skewed_High_Room_Temp_DataSet_2Post/data/Matlab/RW18',
#'RW_Skewed_High_Room_Temp_DataSet_2Post/data/Matlab/RW19',
#'RW_Skewed_Low_40C_DataSet_2Post/data/Matlab/RW21',
#'RW_Skewed_Low_40C_DataSet_2Post/data/Matlab/RW22',
#'RW_Skewed_Low_40C_DataSet_2Post/data/Matlab/RW23',
#'RW_Skewed_High_40C_DataSet_2Post/data/Matlab/RW25',
#'RW_Skewed_High_40C_DataSet_2Post/data/Matlab/RW26',
#'RW_Skewed_High_40C_DataSet_2Post/data/Matlab/RW27',
]
test_names = [
'Battery_Uniform_Distribution_Variable_Charge_Room_Temp_DataSet_2Post/data/Matlab/RW8',
#'Battery_Uniform_Distribution_Discharge_Room_Temp_DataSet_2Post/data/Matlab/RW6',
#'Battery_Uniform_Distribution_Charge_Discharge_DataSet_2Post/data/Matlab/RW12',
#'RW_Skewed_Low_Room_Temp_DataSet_2Post/data/Matlab/RW16',
#'RW_Skewed_High_Room_Temp_DataSet_2Post/data/Matlab/RW20',
#'RW_Skewed_Low_40C_DataSet_2Post/data/Matlab/RW24',
#'RW_Skewed_High_40C_DataSet_2Post/data/Matlab/RW28',
]
data = NasaRandomizedData()
(train_x, train_y, test_x, test_y,
battery_name_cycle_train, battery_name_cycle_test,
time_train, time_test,
current_train, current_test) = data.get_discharge_whole_cycle_future(train_names, test_names)
VISUALIZATION_START = 0
VISUALIZATION_END = 100000
display_x = train_x.reshape(train_x.shape[0]*train_x.shape[1], train_x.shape[2])
fig = go.Figure()
fig.add_trace(go.Scatter(y=display_x[VISUALIZATION_START:VISUALIZATION_END,0],
mode='lines', name='Voltage'))
fig.update_layout(title='Voltage',
xaxis_title='Step',
yaxis_title='Voltage',
width=1400,
height=600)
fig.show()
fig = go.Figure()
fig.add_trace(go.Scatter(y=display_x[VISUALIZATION_START:VISUALIZATION_END,1],
mode='lines', name='Current'))
fig.update_layout(title='Current',
xaxis_title='Step',
yaxis_title='Current',
width=1400,
height=600)
fig.show()
fig = go.Figure()
fig.add_trace(go.Scatter(y=display_x[VISUALIZATION_START:VISUALIZATION_END,2],
mode='lines', name='Temperature'))
fig.update_layout(title='Temperature',
xaxis_title='Step',
yaxis_title='Temperature',
width=1400,
height=600)
fig.show()
fig = go.Figure()
fig.add_trace(go.Scatter(y=time_train.flatten()[VISUALIZATION_START:VISUALIZATION_END],
mode='lines', name='Time'))
fig.update_layout(title='Time',
xaxis_title='Step',
yaxis_title='Time',
width=1400,
height=600)
fig.show()
fig = go.Figure()
fig.add_trace(go.Scatter(y=train_y.flatten()[VISUALIZATION_START:VISUALIZATION_END],
mode='lines', name='Capacity'))
fig.update_layout(title='Capacity',
xaxis_title='Step',
yaxis_title='Capacity',
width=1400,
height=600)
fig.show() |
import os
import torch
import cv2
import torch.nn as nn
import torchvision.datasets as datasets
import torch.utils.data as data
from PIL import Image
from torchvision import transforms
from glob import glob
from torch.utils.data import DataLoader, Dataset
import glob
import numpy as np
from skimage import io
from torch.utils.data import DataLoader, Dataset
import json
import cv2
import matplotlib.pyplot as plt
import pandas as pd
import PIL
from PIL import Image
from torchvision.transforms import transforms
from torchvision.transforms import ToTensor, Resize
from self_supervised.apply import config
import mbv_config
class MBV_Dataset_SSL(nn.Module):
def __init__(self, raw_train_file_path, reflectance_train_file_path, training_method=None, transform = None, augmentation_strategy = None, image_type = None):
self.raw_image_path_list = []
self.reflectance_image_path_list = []
self.transform = transform
self.label_dict = mbv_config.label_dict
#SSL specific
self.augmentation_strategy_1 = augmentation_strategy
self.augmentation_strategy_2 = augmentation_strategy
self.training_method = training_method
self.image_type = image_type
self.raw_image_path_list = np.load (raw_train_file_path)
self.reflectance_image_path_list = np.load(reflectance_train_file_path)
def __len__(self):
return len(self.raw_image_path_list)
def __getitem__(self, index):
raw_image_path = self.raw_image_path_list[index]
raw_image = PIL.Image.open(raw_image_path)
reflectance_image_path = self.reflectance_image_path_list[index]
reflectance_image = PIL.Image.open(reflectance_image_path)
if self.training_method == config.dc:
if self.image_type == mbv_config.image_seprately:
raw_image = np.asarray(raw_image)
reflectance_image = np.asarray(reflectance_image)
raw_ref_ref_image = cv2.merge((raw_image, reflectance_image, reflectance_image))
view1_view2_view2 = self.augmentation_strategy_1(image = raw_ref_ref_image)
transformed_view1, transformed_view2 = None, None
if self.transform != None:
transformed_view1 = self.transform[0](image = view1_view2_view2)
#transformed_view2 = self.transform[1](image = view1_view2_view2[:,:,1])
transformed_view1_3ch = np.vstack([transformed_view1, transformed_view1, transformed_view1])
transformed_view2_3ch = np.vstack([transformed_view2, transformed_view2, transformed_view2])
return transformed_view1_3ch, transformed_view2_3ch
if self.image_type == mbv_config.image_both_seprately:
'''using state for uniform random cropp on both ref and raw image'''
state = torch.get_rng_state()
view1 = self.augmentation_strategy_1(raw_image)
torch.set_rng_state(state)
view2 = self.augmentation_strategy_1(reflectance_image)
transformed_view1, transformed_view2 = None, None
if self.transform != None:
transformed_view1 = self.transform[0](view1)
transformed_view2 = self.transform[1](view2)
transformed_view1_3ch = np.vstack([transformed_view1, transformed_view1, transformed_view1])
transformed_view2_3ch = np.vstack([transformed_view2, transformed_view2, transformed_view2])
return transformed_view1_3ch, transformed_view2_3ch
elif self.image_type == mbv_config.image_both:
transformed_view1, transformed_view2 = None, None
if self.transform != None:
transformed_view1 = self.transform[0](raw_image)
transformed_view2 = self.transform[1](reflectance_image)
raw_image = np.asarray(raw_image)
reflectance_image = np.asarray(reflectance_image)
combined_image = cv2.merge((raw_image, reflectance_image, raw_image))
#combined_image = Image.fromarray(combined_image).astype('float32')
view1 = self.augmentation_strategy_1(image = combined_image)
view2 = self.augmentation_strategy_2(image = combined_image)
return view1, view2
elif self.image_type == mbv_config.image_raw:
view1 = self.augmentation_strategy_1(raw_image)
view2 = self.augmentation_strategy_2(raw_image)
transformed_view1, transformed_view2 = None, None
if self.transform != None:
transformed_view1 = self.transform[0](view1)
transformed_view2 = self.transform[0](view2)
return transformed_view1, transformed_view2
elif self.image_type == mbv_config.image_ref:
view1 = self.augmentation_strategy_1(reflectance_image)
view2 = self.augmentation_strategy_2(reflectance_image)
transformed_view1, transformed_view2 = None, None
if self.transform != None:
transformed_view1 = self.transform[0](view1)
transformed_view2 = self.transform[0](view2)
return transformed_view1, transformed_view2
elif self.training_method == config.BYOL:
if self.image_type == mbv_config.image_both:
transformed_view1, transformed_view2 = None, None
if self.transform != None:
transformed_view1 = self.transform[0](raw_image)
transformed_view2 = self.transform[1](reflectance_image)
combined_image = cv2.merge((raw_image, reflectance_image, raw_image))
view1 = self.augmentation_strategy_1(combined_image)
return view1
elif self.image_type == mbv_config.image_raw:
view1 = self.augmentation_strategy_1(raw_image)
transformed_view1 = None
if self.transform != None:
transformed_view1 = self.transform[0](view1)
return transformed_view1
elif self.image_type == mbv_config.image_ref:
view1 = self.augmentation_strategy_1(reflectance_image)
transformed_view1 = None
if self.transform != None:
transformed_view1 = self.transform[0](view1)
return transformed_view1
def get_MBV_trainset_loader(raw_train_file_path, reflectance_train_file_path, batch_size, training_method=None, transform = None, augmentation_strategy = None, image_type = None):
dataset = MBV_Dataset_SSL(raw_train_file_path, reflectance_train_file_path, training_method, transform, augmentation_strategy, image_type)
train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=1)
return train_loader |
from django.contrib import admin
# Expose the Experiment Model on the Admin Site
from tp.models import Experiment
#todo: register more models? security of admin pannel
admin.site.register(Experiment)
|
# -*- coding: utf-8 -*-
# @Time : 3/31/2021 3:08 PM
# @Author : Rex Yu
# @Mail : jiafish@outlook.com
# @Github : https://github.com/Rexyyj
from common.MyMQTT import *
from common.RegManager import *
import json
import time
class MotorConnector():
def __init__(self, confAddr):
try:
self.conf = json.load(open(confAddr))
except:
print("Configuration file not found")
exit()
self.deviceId = self.conf["deviceId"]
self.client = MyMQTT(self.deviceId, self.conf["broker"], int(self.conf["port"]), self)
self.workingStatus = "on"
self.doorStatus = "close"
self.motorTopic = self.conf["motorTopic"]
self.switchTopic = self.conf["switchTopic"]
regMsg = {"registerType": "device",
"id": self.deviceId,
"type": "motor",
"topic": self.motorTopic,
"attribute": {"floor": self.conf["floor"],
"enterZone": self.conf["enterZone"],
"leavingZone": self.conf["leavingZone"],
"entranceId":self.conf["entrance"],
"currentStatus": self.doorStatus}
}
self.Reg = RegManager(self.conf["homeCatAddress"])
self.museumSetting = self.Reg.register(regMsg)
if self.museumSetting == "":
exit()
def start(self):
self.client.start()
self.client.mySubscribe(self.switchTopic)
self.client.mySubscribe(self.motorTopic)
def stop(self):
self.client.stop()
self.Reg.delete("device", self.conf["deviceId"])
def notify(self, topic, msg):
data = json.loads(msg)
if topic == self.switchTopic:
if data["target"]=="ALL" or data["target"]=="motor" or data["target"]==self.deviceId:
self.workingStatus = data["switchTo"]
print(str(self.deviceId)+" switch to "+data["switchTo"])
elif topic == self.motorTopic:
if self.workingStatus=="on" and data["id"]==self.deviceId:
targetStatus = data["targetStatus"]
self.setDoorStatus(targetStatus)
else:
pass
def setDoorStatus(self, status):
self.doorStatus = status
time.sleep(1)
print("Door " + self.deviceId + " status:" + self.doorStatus)
if __name__ == "__main__":
configFile = input("Enter the location of configuration file: ")
if len(configFile) == 0:
configFile = "./configs/motorConfig.json"
motorConnector = MotorConnector(configFile)
mode = input("Enter the mode of motor:(default is normal)")
if mode == "Exit" or mode=="Enter":
print("Motor set to "+mode+" mode...")
else:
print("Motor set to normal mode...")
motorConnector.start()
print("Motor connector is running...")
while True:
c = input("Enter q if want to stop motor connector")
if c == "q":
break
motorConnector.stop()
|
'''https://leetcode.com/problems/jump-game/
55. Jump Game
Medium
8538
507
Add to List
Share
You are given an integer array nums. You are initially positioned at the array's first index, and each element in the array represents your maximum jump length at that position.
Return true if you can reach the last index, or false otherwise.
Example 1:
Input: nums = [2,3,1,1,4]
Output: true
Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.
Example 2:
Input: nums = [3,2,1,0,4]
Output: false
Explanation: You will always arrive at index 3 no matter what. Its maximum jump length is 0, which makes it impossible to reach the last index.
Constraints:
1 <= nums.length <= 104
0 <= nums[i] <= 105'''
# Time: O(n)
# Space: O(1)
from typing import List
class Solution(object):
# @param A, a list of integers
# @return a boolean
def canJump(self, A):
reachable = 0
for i, length in enumerate(A):
if i > reachable:
break
reachable = max(reachable, i + length)
return reachable >= len(A) - 1
class Solution:
def canJump(self, nums: List[int]) -> bool:
far = 0
for i in range(len(nums)):
if i <= far:
far = max(far, nums[i] + i)
else:
return False
return True
def funcname(self, nums):
m = 0
for i, n in enumerate(nums):
if i > m:
return False
m = max(m, i+n)
return True
if __name__ == "__main__":
s = Solution()
print(s.canJump([2, 0, 0]))
|
# mode: python; py-indent-offset: 4; tab-width: 8; coding:utf-8
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.execution_engine.data_connector.abstract_data_connector import AbstractDataConnector
import trino
import re
import copy
class TrinoDataConnector(AbstractDataConnector):
"""
Specific data connector for Trino
"""
data_connection_list = ['hostname',
'port', 'username', 'catalog', 'schema']
NAME = 'TRINO'
CONNECTOR_TYPE = 'connector_type'
CONNECTOR_DATA = 'connector_data'
CONNECTOR_TABLE = 'connector_table'
CONNECTOR_CONDITION = 'connector_condition'
COLUMN_NAME = 0
COLUMN_TYPE = 1
COLUMN_UNKNOWN_1 = 2
COLUMN_UNKNOWN_2 = 3
COLUMN_REGEXP = "^row\\((.*)\\)$"
table_columns_definition = {}
def __init__(self):
"""
Constructor for Dremio data connector
"""
super()
self.hostname = None
self.port = None
self.username = None
self.catalog = None
self.schema = None
def __extract_connection_info(self, connection_data):
"""
Convert structure with connection data given as parameter into member variable
:param connection_data: dictionary regarding connection information, must map TrinoDataConnector.data_connection_list
"""
self.hostname = connection_data['hostname']
self.port = connection_data['port']
self.username = connection_data['username']
self.catalog = connection_data['catalog']
self.schema = connection_data['schema']
def load_data(self, connection_data):
"""
Method to load a data from Trino regarding input information
:param: connection_data_dict, contains the necessary information to connect to Trino API with request
:type: dict
"""
self.__extract_connection_info(connection_data)
# Connect to Trino api
trino_connection = trino.dbapi.connect(
host=self.hostname,
port=self.port,
user=self.username,
catalog=self.catalog,
schema=self.schema,
http_scheme='http')
# Get the data from dremio
table = connection_data[self.CONNECTOR_TABLE]
condition = connection_data[self.CONNECTOR_CONDITION]
sql = f'SELECT * FROM {table}'
if condition:
sql = f'{sql} WHERE {condition}'
connection_cursor = trino_connection.cursor()
connection_cursor.execute(sql)
rows = connection_cursor.fetchall()
self.__update_table_column(connection_cursor, table)
return self.__map_data_with_table_column(rows, table)
def write_data(self, connection_data):
raise Exception("method not implemented")
def set_connector_request(self, connector_info, table, condition):
"""
Update connector dictionary with request information
:param connector_info: dictionary regarding connection information, must map TrinoDataConnector.data_connection_list
:param table: target table name for the request
:param condition: condition to implement in SQL format
:return:
"""
connector_info[TrinoDataConnector.CONNECTOR_TABLE] = table
connector_info[TrinoDataConnector.CONNECTOR_CONDITION] = condition
return connector_info
def __update_table_column(self, connection_cursor, table):
"""
Send a request to Trino in order to get the column name of the table use for the request.
Columns definition will be stored in a static dictionary in order to improve subsequent treatment
:param connection_cursor: current connection cursor to use
:type connection_cursor: str
:param table: table to get the definition
:type table: str
:return: column list
"""
if table not in TrinoDataConnector.table_columns_definition:
connection_cursor.execute(f'SHOW COLUMNS FROM {table}')
rows = connection_cursor.fetchall()
TrinoDataConnector.table_columns_definition[table] = self.__get_column_from_rows(rows)
def __get_column_from_rows(self, request_rows):
"""
translate request result from Trino into a dictionary structure with table columns as key
:param request_rows: Trino show columns for table request
:return: organize dictionary with column structure
"""
columns_definition = {}
for row in request_rows:
column_name = row[TrinoDataConnector.COLUMN_NAME]
column_type = row[TrinoDataConnector.COLUMN_TYPE]
sub_object = re.findall(TrinoDataConnector.COLUMN_REGEXP, column_type)
if len(sub_object) > 0:
# Split sub string which in the form
# 'name1 type_name1, name2 type_name2, ....
split_by_comma = sub_object[0].split(',')
for index in range(len(split_by_comma)):
split_by_comma[index] = split_by_comma[index].strip().split(' ')
sub_definition = self.__get_column_from_rows(split_by_comma)
columns_definition[column_name] = sub_definition
else:
columns_definition[column_name] = column_type
return columns_definition
def __map_data_with_table_column(self, request_rows, table_name):
"""
Using column definition build dictionary that map attribute name with their values
:param request_rows: Trino request result
:param table_name: corresponding table
:return: dictionary list that map attribute with their value
"""
results = []
for one_result in request_rows:
# Get table definition
table_definition = copy.deepcopy(TrinoDataConnector.table_columns_definition[table_name])
self.__insert_list_value(table_definition, one_result)
results.append(table_definition)
return results
def __insert_list_value(self, dictionary_to_update, list_to_insert):
"""
Manage mapping for a value list regarding dictionary key
:param dictionary_to_update: Current dictionary to update
:param list_to_insert: Datalist to insert
"""
values_index = 0
for key in dictionary_to_update.keys():
value = list_to_insert[values_index]
values_index += 1
if isinstance(value, list):
self.__insert_list_value(dictionary_to_update[key], value)
else:
dictionary_to_update[key] = value
if __name__ == '__main__':
trino_connector = TrinoDataConnector()
data_connection = {
'hostname': 'idlvsrv201.eu.airbus.corp',
'port': 30300,
'username': 'sostrades',
'catalog': 'mongodb',
'schema': 'world'
}
trino_connector.set_connector_request(data_connection, 'TECHNO', None)
result = trino_connector.load_data(data_connection)
print(result)
|
from .admin_feedback_list_view import AdminFeedbackListView
from .region_feedback_list_view import RegionFeedbackListView
from .admin_feedback_actions import (
mark_admin_feedback_as_read,
mark_admin_feedback_as_unread,
delete_admin_feedback,
)
from .region_feedback_actions import (
mark_region_feedback_as_read,
mark_region_feedback_as_unread,
delete_region_feedback,
)
|
from django.conf import settings
from django.contrib import admin
from django.conf.urls import include, url
from django.conf.urls.static import static
urlpatterns = [
url('admin/', admin.site.urls),
url('', include('home.urls')),
url(r'^db/', include(('database.urls', 'database'), namespace='db'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns |
# File: awssystemsmanager_consts.py
#
# Copyright (c) 2019-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# Define your constants here
SSM_JSON_ACCESS_KEY = "access_key"
SSM_JSON_SECRET_KEY = "secret_key"
SSM_JSON_REGION = "region"
SSM_JSON_DEFAULT_S3_BUCKET = "default_s3_bucket"
SSM_JSON_BAD_ASSET_CONFIG_MSG = "Please provide access keys or select assume role check box in asset configuration"
SSM_REGION_DICT = {
"US East (N. Virginia)": "us-east-1",
"US East (Ohio)": "us-east-2",
"US West (N. California)": "us-west-1",
"US West (Oregon)": "us-west-2",
"Asia Pacific (Hong Kong)": "ap-east-1",
"Asia Pacific (Mumbai)": "ap-south-1",
"Asia Pacific (Seoul)": "ap-northeast-2",
"Asia Pacific (Singapore)": "ap-southeast-1",
"Asia Pacific (Sydney)": "ap-southeast-2",
"Asia Pacific (Tokyo)": "ap-northeast-1",
"Canada (Central)": "ca-central-1",
"China (Beijing)": "cn-north-1",
"China (Ningxia)": "cn-northwest-1",
"EU (Frankfurt)": "eu-central-1",
"EU (Ireland)": "eu-west-1",
"EU (London)": "eu-west-2",
"EU (Paris)": " eu-west-3",
"EU (Stockholm)": "eu-north-1",
"South America (Sao Paulo)": "sa-east-1",
"AWS GovCloud (US-East)": "us-gov-east-1",
"AWS GovCloud (US)": "us-gov-west-1"
}
POWERSHELL_DOCUMENT = 'AWS-RunPowerShellScript'
POWERSHELL_DOC_HASH = '2142e42a19e0955cc09e43600bf2e633df1917b69d2be9693737dfd62e0fdf61'
LINUX_DOCUMENT = 'AWS-RunShellScript'
LINUX_DOC_HASH = '99749de5e62f71e5ebe9a55c2321e2c394796afe7208cff048696541e6f6771e'
DEFAULT_REQUEST_TIMEOUT = 30 # in seconds
|
# Normal libraries
from enum import IntEnum
class DefaultErrorType(IntEnum):
"""
An enumerable object for the default errors that occur with interaction creation.
.. note::
This is a port from v3's errors, which basically delegate errors to a unique error code.
This enum's purpose is to help remember error codes. Importing this class is not required.
i.e. : raise InteractionException(1) == raise InteractionException(REQUEST_FAILURE)
"""
BASE = 0
REQUEST_FAILURE = 1
INCORRECT_FORMAT = 2
DUPLICATE_COMMAND = 3
DUPLICATE_CALLBACK = 4
DUPLICATE_SLASH_CLIENT = 5
CHECK_FAILURE = 6
INCORRECT_TYPE = 7
INCORRECT_GUILD_ID_TYPE = 8
INCORRECT_COMMAND_DATA = 9
ALREADY_RESPONDED = 10
class OpCodeType(IntEnum):
"""
An enumerable object for the Gateway's OPCODE result state.
This is representative of the OPCodes generated by the WebSocket.
.. note::
Equivalent of `Gateway Opcodes <https://discord.com/developers/docs/topics/opcodes-and-status-codes#gateway-opcodes>`_ in the Discord API.
"""
DISPATCH = 0
HEARTBEAT = 1
IDENTIFY = 2
PRESENCE = 3
VOICE_STATE = 4
VOICE_PING = 5
RESUME = 6
RECONNECT = 7
REQUEST_MEMBERS = 8
INVALIDATE_SESSION = 9
HELLO = 10
HEARTBEAT_ACK = 11
GUILD_SYNC = 12
class WSCloseCodeType(IntEnum):
"""
An enumerable object for the Gateway's closing connection codes.
This is representative of the Gateway responses generated by the WebSocket.
.. note::
Equivalent of `Gateway Close Event Codes <https://discord.com/developers/docs/topics/opcodes-and-status-codes#gateway-gateway-close-event-codes>`_ in the Discord API.
"""
UNKNOWN_ERROR = 4000
UNKNOWN_OPCODE = 4001
DECODE_ERROR = 4002
NOT_AUTHENTICATED = 4003
AUTHENTICATION_FAILED = 4004
ALREADY_AUTHENTICATED = 4005
INVALID_SEQ = 4007
RATE_LIMITED = 4008
SESSION_TIMED_OUT = 4009
INVALID_SHARD = 4010
SHARDING_REQUIRED = 4011
INVALID_API_VERSION = 4012
INVALID_INTENTS = 4013
DISALLOWED_INTENTS = 4014
class HTTPResponseType(IntEnum):
"""
An enumerable object for the HTTP response codes Discord gives out.
.. note::
This enumerable does not list the documented "5xx", as it may vary.
"""
OK = 200
CREATED = 201
NO_CONTENT = 204
NOT_MODIFIED = 304
BAD_REQUEST = 400
UNAUTHORIZED = 401
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
TOO_MANY_REQUESTS = 429
GATEWAY_UNAVAILABLE = 502
class JSONResponseType(IntEnum):
"""
An enumerable object for the JSON error response codes Discord gives out.
.. note::
Equivalent of `JSON Error Codes <https://discord.com/developers/docs/topics/opcodes-and-status-codes#json-json-error-codes>`_ in the Discord API.
"""
GENERIC_ERROR = 0
UNKNOWN_ACCOUNT = 10001
UNKNOWN_APPLICATION = 10002
UNKNOWN_CHANNEL = 10003
UNKNOWN_GUILD = 10004
UNKNOWN_INTEGRATION = 10005
UNKNOWN_INVITE = 10006
UNKNOWN_MEMBER = 10007
UNKNOWN_MESSAGE = 10008
UNKNOWN_OVERWRITE = 10009
UNKNOWN_PROVIDER = 10010
UNKNOWN_ROLE = 10011
UNKNOWN_TOKEN = 10012
UNKNOWN_USER = 10013
UNKNOWN_EMOJI = 10014
UNKNOWN_WEBHOOK = 10015
UNKNOWN_WEBHOOK_SERVICE = 10016
UNKNOWN_SESSION = 10020
UNKNOWN_BAN = 10026
UNKNOWN_SKU = 10027
UNKNOWN_STORE_LISTING = 10028
UNKNOWN_ENTITLEMENT = 10029
UNKNOWN_TEAM = 10030
UNKNOWN_LOBBY = 10031
UNKNOWN_BRANCH = 10032
UNKNOWN_STORE_DIRECTORY_LAYOUT = 10033
UNKNOWN_REDISTRIBUTABLE = 10036
UNKNOWN_GIFT_CODE = 10038
UNKNOWN_STREAM = 10049
UNKNOWN_GUILD_BOOST_COOLDOWN = 10050
UNKNOWN_GUILD_TEMPLATE = 10057
UNKNOWN_DISCOVERY_CATEGORY = 10059
UNKNOWN_STICKER = 10060
UNKNOWN_INTERACTION = 10062
UNKNOWN_APPLICATION_COMMAND = 10063
UNKNOWN_APPLICATION_COMMAND_PERMISSIONS = 10066
UNKNOWN_STAGE = 10067
UNKNOWN_GUILD_MEMBER_VERIFICATION_FORM = 10068
UNKNOWN_GUILD_WELCOME_SCREEN = 10069
UNKNOWN_SCHEDULED_EVENT = 10070
UNKNOWN_SCHEDULED_EVENT_USER = 10071
BOTS_NOT_ALLOWED = 20001
ONLY_BOTS_ALLOWED = 20002
EXPLICIT_CONTENT = 20009
USER_NOT_AUTHORIZED_FOR_APPLICATION = 20012
ACCOUNT_DISABLED = 20013
RATE_LIMIT_SLOWMODE = 20016
TEAM_OWNERSHIP_REQUIRED = 20018
RATE_LIMIT_ANNOUNCEMENT_MESSAGE_EDIT = 20022
RATE_LIMIT_CHANNEL_WRITE = 20028
NAME_CONTAINS_DISALLOWED_WORD = 20031
GUILD_SUBSCRIPTION_LEVEL_TOO_LOW = 20035
MAX_GUILDS = 30001
MAX_FRIENDS = 30002
MAX_PINS = 30003
MAX_RECIPIENTS = 30004
MAX_ROLES = 30005
MAX_WEBHOOKS = 30007
MAX_EMOJIS = 30008
MAX_REACTIONS = 30010
MAX_CHANNELS = 30013
MAX_ATTACHMENTS = 30015
MAX_INVITES = 30016
MAX_ANIMATED_EMOJIS = 30018
MAX_GUILD_MEMBERS = 30019
MAX_GUILD_DISCOVERY_CATEGORY = 30030
GUILD_HAS_TEMPLATE = 30031
MAX_THREAD_PARTICIPANTS = 30033
MAX_BANS = 30035
MAX_BAN_FETCHES = 30037
MAX_STICKERS = 30039
RATE_LIMIT_PRUNE = 30040
UNAUTHORIZED = 40001
EMAIL_VERIFICATION_REQUIRED = 40002
RATE_LIMIT_PRIVATE_CHANNEL_OPENING = 40003
REQUEST_TOO_LARGE = 40005
FEATURE_DISABLED = 40006
USER_BANNED = 40007
USER_NOT_CONNECTED_TO_VOICE = 40032
MESSAGE_CROSSPOSTED = 40033
USER_IDENTITY_VERIFICATION_PROCESSING = 40035
USER_IDENTITY_VERIFICATION_SUCCEEDED = 40036
APPLICATION_NAME_USED = 40041
MISSING_ACCESS = 50001
INVALID_ACCOUNT_TYPE = 50002
INVALID_ACTION_FOR_PRIVATE_CHANNEL = 50003
WIDGET_DISABLED = 50004
CANNOT_EDIT_MESSAGE_OF_OTHER_USER = 50005
CANNOT_CREATE_EMPTY_MESSAGE = 50006
CANNOT_MESSAGE_USER = 50007
CANNOT_SEND_MESSAGE_TO_NON_TEXT_CHANNEL = 50008
CHANNEL_VERIFICATION_LEVEL_TOO_HIGH = 50009
OAUTH2_APPLICATION_HAS_NO_BOT = 50010
OAUTH2_APPLICATION_LIMIT_REACHED = 50011
INVALID_OAUTH2_STATE = 50012
MISSING_PERMISSIONS = 50013
INVALID_TOKEN = 50014
NOTE_TOO_LONG = 50015
BULK_DELETE_AMOUNT_OUT_OF_RANGE = 50016
CANNOT_PIN_MESSAGE_IN_DIFFERENT_CHANNEL = 50019
INVITE_CODE_INVALID_OR_TAKEN = 50020
INVALID_ACTION_FOR_SYSTEM_MESSAGE = 50021
INVALID_ACTION_FOR_THIS_CHANNEL_TYPE = 50024
INVALID_OAUTH2_ACCESS_TOKEN = 50025
MISSING_OAUTH2_SCOPE = 50026
INVALID_WEBHOOK_TOKEN = 50027
INVALID_ROLE = 50028
INVALID_RECIPIENTS = 50033
BULK_DELETE_MESSAGE_TOO_OLD = 50034
INVALID_FORM_BODY = 50035
CANNOT_ADD_USER_TO_GUILD_WHERE_BOT_IS_NOT_IN = 50036
INVALID_API_VERSION = 50041
ASSET_SIZE_TOO_LARGE = 50045
INVALID_ASSET = 50046
CANNOT_SELF_REDEEM_THIS_GIFT = 50054
PAYMENT_SOURCE_REQUIRED_TO_REDEEM_GIFT = 50070
CANNOT_DELETE_COMMUNITY_CHANNEL = 50074
INVALID_STICKER_SENT = 50081
INVALID_ACTION_FOR_ARCHIVED_THREAD = 50083
INVALID_THREAD_NOTIFICATION_SETTING = 50084
BEFORE_VALUE_EARLIER_THAN_CREATION_TIME = 50085
INVALID_COUNTRY_CODE = 50095
GUILD_MONETIZATION_REQUIRED = 50097
MORE_BOOSTS_REQUIRED = 50101
MFA_REQUIRED = 60003
NO_USERS_WITH_TAG_EXISTS = 80004
REACTION_BLOCKED = 90001
RESOURCE_OVERLOADED = 130000
STAGE_ALREADY_OPEN = 150006
MESSAGE_HAS_THREAD = 160004
THREAD_LOCKED = 160005
MAX_ACTIVE_THREADS = 160006
MAX_ACTIVE_ANNOUNCEMENT_THREADS = 160007
INVALID_LOTTIE_JSON = 170001
NO_RASTERIZED_IMAGES_IN_LOTTIE = 170002
STICKER_MAXIMUM_FRAMERATE_EXCEEDED = 170003
STICKER_FRAME_COUNT_OVER_1000 = 170004
STICKER_MAXIMUM_DIMENSIONS_EXCEEDED = 170005
STICKER_FRAME_RATE_OUT_OF_EXPECTED_RANGE = 170006
STICKER_ANIMATION_DURATION_EXCEEDS_5_SECOND = 170007
|
##############################
# author : qianqiu
# email : qianqiu@tencent.com
# time : 2022.1.7
##############################
import os
import tvm
import tvm.relay as relay
from tvm.driver import tvmc
from tvm.contrib import graph_executor
from tvm import auto_scheduler
import onnx
import onnx_graphsurgeon as gs
import onnxruntime as ort
from onnx import shape_inference
import numpy as np
class CudaKernel(object):
"""Use Tvm AutoScheduler generate efficient cudaKernel and params needed by trt-plugin
Parameters
----------
Model path : str
The onnx model
Tuning name : str
The Operator needed generate plugin which in onnx model
"""
def __init__(
self,
model_path,
tuning_node,
plugin_name,
one_node_model="submodel.onnx",
):
self._model_path = model_path
self._one_node_model = one_node_model
self._tuning_name = tuning_node.name
self._target = tvm.target.Target("cuda")
self._log_file = "/tmp/tuning.log"
if isinstance(model_path, str):
self._onnx_model = onnx.load(model_path)
else:
self._onnx_model = model_path
self._tuning_node = tuning_node
self._onnx_op_type = tuning_node.op
self._plugin_name = plugin_name
# self._plugin_name = 'tpat_' + tuning_name
def run(self, opt_level=3, input_data=None, opset=None):
"""
Tvm Auto Scheduler
"""
graph_def = self.extract_target_onnx_node(self._onnx_model)
if not isinstance(input_data, list) and input_data is not None:
input_data = [input_data]
if input_data is not None:
_, shape_dict = self.get_input_data_shape_dict(graph_def, input_data)
if input_data is not None:
mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset)
else:
mod, params = relay.frontend.from_onnx(graph_def)
tasks, weights = tvmc.autotuner.autoscheduler_get_tuning_tasks(
mod, params, self._target, include_simple_tasks=True, opt_level=opt_level
)
if len(tasks) != 0:
self.tune(tasks, weights)
# Compile with the history best
print("Compile...", flush=True)
with auto_scheduler.ApplyHistoryBest(self._log_file):
with tvm.transform.PassContext(
opt_level=opt_level, config={"relay.backend.use_auto_scheduler": True}
):
self._lib = relay.build(mod, self._target, params=params)
dev = tvm.device(str(self._target), 0)
self._module = graph_executor.create(
self._lib.get_graph_json(), self._lib.get_lib(), dev
)
print("Running...", flush=True)
self._module.run()
def check_plugin(self, onnx_node):
"""
Check whether this operator's plugin had been generated.(For multiple ops which have the same type.)
"""
print(
"Check onnx node {}\n with plugin: {}".format(self._tuning_node, onnx_node)
)
if (
self._tuning_node.op != onnx_node.op
or self._tuning_node.attrs != onnx_node.attrs
):
return False
if len(self._tuning_node.inputs) != len(onnx_node.inputs) or len(
self._tuning_node.outputs
) != len(onnx_node.outputs):
return False
for inp, onnx_node_inp in zip(self._tuning_node.inputs, onnx_node.inputs):
if (
inp.__class__ != onnx_node_inp.__class__
or inp.shape != onnx_node_inp.shape
or inp.dtype != onnx_node_inp.dtype
):
return False
if isinstance(inp, gs.ir.tensor.Constant):
if not np.array_equal(inp.values, onnx_node_inp.values):
return False
for out, onnx_node_out in zip(self._tuning_node.outputs, onnx_node.outputs):
if (
out.__class__ != onnx_node_out.__class__
or out.shape != onnx_node_out.shape
or out.dtype != onnx_node_out.dtype
):
return False
return True
def check_existing_plugins(self, trt_plugin_mapping_onnx_node):
for trt_plugin, onnx_node in trt_plugin_mapping_onnx_node.items():
if self.check_plugin(onnx_node):
return trt_plugin
return None
def compute_tensor_shape(self, input_model_path):
"""
Get output shape through onnx-runtime and shape_inference.
"""
inferred_model = shape_inference.infer_shapes(onnx.load(input_model_path))
graph = gs.import_onnx(inferred_model)
tuning_nodes = [node for node in graph.nodes if node.name == self._tuning_name]
tuning_node = tuning_nodes[0]
tuning_node_inputs = [
graph.tensors()[inp.name].to_variable(dtype=inp.dtype, shape=inp.shape)
for inp in tuning_node.inputs
if inp.__class__ == gs.Variable
]
tuning_node_outputs = [
graph.tensors()[oup.name].to_variable(dtype=oup.dtype, shape=oup.shape)
for oup in tuning_node.outputs
]
graph.outputs = []
graph.outputs.extend(tuning_node_inputs)
graph.outputs.extend(tuning_node_outputs)
# print("half graph: \n", graph.outputs)
graph.cleanup()
half_model = gs.export_onnx(graph)
half_model_path = "half_model.onnx"
onnx.save(half_model, half_model_path)
session = ort.InferenceSession(half_model_path)
outname = [output.name for output in session.get_outputs()]
dummy_input = {}
for gi in graph.inputs:
dummy_input[gi.name] = (1 + np.random.random(gi.shape)).astype(gi.dtype)
dummy_output = session.run(outname, dummy_input)
computed_tensor_shapes = []
for i in range(len(tuning_node_inputs)):
assert tuning_node_inputs[i].name == outname[i]
computed_tensor_shapes.append(dummy_output[i].shape)
# print(f"node output {tuning_node_inputs[i].name} with shape {dummy_output[i].shape}")
for i in range(len(tuning_node_outputs)):
assert tuning_node_outputs[i].name == outname[len(tuning_node_inputs) + i]
computed_tensor_shapes.append(
dummy_output[len(tuning_node_inputs) + i].shape
)
os.remove(half_model_path)
return computed_tensor_shapes
def extract_target_onnx_node(self, model):
"""
Extra target operator from onnx model
"""
inferred_model = shape_inference.infer_shapes(model)
graph = gs.import_onnx(inferred_model)
nodes = graph.nodes
tensors = graph.tensors()
tuning_node_list = [node for node in nodes if node.name == self._tuning_name]
assert (
len(tuning_node_list) != 0
), "Not get tuning node in onnx model, please check op name or onnx model"
tuning_node = tuning_node_list[0]
# self._tuning_node = tuning_node
# self._onnx_op_type = tuning_node.op
tuning_node_inputs = [
tensors[inp.name].to_variable(dtype=inp.dtype, shape=inp.shape)
for inp in tuning_node.inputs
if inp.__class__ == gs.Variable
]
tuning_node_outputs = [
tensors[oup.name].to_variable(dtype=oup.dtype, shape=oup.shape)
for oup in tuning_node.outputs
]
computed_tensor_shapes = self.compute_tensor_shape(
self._model_path
)
# enhanced shape calculation
for i in range(len(tuning_node_inputs)):
tuning_node_inputs[i].shape = computed_tensor_shapes[i]
for i in range(len(tuning_node_outputs)):
tuning_node_outputs[i].shape = computed_tensor_shapes[
len(tuning_node_inputs) + i
]
graph.inputs = tuning_node_inputs
graph.outputs = tuning_node_outputs
graph.cleanup()
submodel = gs.export_onnx(graph)
onnx.save(submodel, self._one_node_model)
return submodel
def tune(self, tasks, task_weights):
"""
The Search Config for Tvm AutoScheduler
"""
print("Begin tuning...", flush=True)
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
repeat=10, min_repeat_ms=300, timeout=10
)
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=200, # change this to 20000 to achieve the best performance
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(self._log_file)],
)
tuner.tune(tune_option)
def get_input_data_shape_dict(self, graph_def, input_data):
"""
Get shape of input data.
"""
if isinstance(input_data, list):
input_names = {}
shape_dict = {}
for i, _ in enumerate(input_data):
input_names[i] = graph_def.graph.input[i].name
shape_dict[input_names[i]] = input_data[i].shape
else:
input_names = graph_def.graph.input[0].name
shape_dict = {input_names: input_data.shape}
return input_names, shape_dict
# Cuda Kernel generated by tvm.
@property
def cuda_source_code(self):
try:
source_code = self._lib.get_lib().imported_modules[0].get_source()
source_code = source_code.replace("signed char*", "int*")
source_code = source_code.replace("uint64_t*", "int*")
source_code = source_code.replace("long long", "int")
source_code = source_code.replace("double", "float")
except IndexError:
return None
return source_code
# Tvm runtime Module.
@property
def runtime_module(self):
return self._lib
# Tvm Graph executor
@property
def graph_module(self):
return self._module
# Constant params in operator. such as weight in Matmul operator.
@property
def constant_param(self):
return self._lib.get_constant_params()
# Tvm executor the order of device functions.
@property
def device_funcs_inorder(self):
return self._lib.get_device_function_list()
# The config of Grid. Block. Thread.
@property
def device_funcs_thread_config(self):
return self._lib.get_grid_block_thread_config()
# Independently allocated memory on the device side.
@property
def device_allocate_global_memory(self):
return self._lib.get_device_memory_size()
# The number of inputs.
@property
def num_inputs(self):
return self._module.get_num_inputs()
# The number of output.
@property
def num_outputs(self):
return self._module.get_num_outputs()
# The dtype of variables which are stored in memory.
@property
def workspace_dtype(self):
return self._module.get_workspace_dtype()
# The size of variables which are stored in memory.
@property
def workspace_size(self):
return self._module.get_workspace_size()
# Tvm executor the order of host functions.
@property
def func_inorder(self):
return self._module.get_func_inorder()
# The storage index in memory for each variable.
@property
def storageid(self):
return self._module.get_storageid()
# Generated plugin name
@property
def plugin_name(self):
return self._plugin_name
# Tuning op type.
@property
def onnx_op_type(self):
return self._onnx_op_type
# Tuning op name.
@property
def tuning_name(self):
return self._tuning_name
|
import glob
import settings as settings
import zipfile
import arrow
import os
import shutil
import logging
"""
look in unpacked_renamed_ejp_files
look for all matching pdf and xml files
elife_poa_e000213.xml
elife_poa_e000213.pdf
If there is an xml or pdf file that is not matched, log an error
for the day of delivery take these files and put them into a zip file named
elife_poa_YYYYMMDD.zip
put that zip file into `ftp-to-hw`
move processed pdf and xml files into
made_ftp_ready_on/YYYMMDD
GOTCHAS
When run multiple times it may possibly corrupt exising zip files, worthy of investigation.
"""
## Setup logging
# local logger
logger = logging.getLogger('prepPdfXMLforFTP')
hdlr = logging.FileHandler('prepPdfXMLforFTP.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
# global logger
workflow_logger = logging.getLogger('ejp_to_hw_workflow')
hdlr = logging.FileHandler('ejp_to_hw_workflow.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
workflow_logger.addHandler(hdlr)
workflow_logger.setLevel(logging.INFO)
def zip(src, dst):
zf = zipfile.ZipFile("%s.zip" % (dst), "w")
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
logger.info('zipping %s as %s' % (os.path.join(dirname, filename),
arcname))
zf.write(absname, arcname)
zf.close()
def check_matching_files_exist(pdf_file_articles_numbers, xml_file_articles_numbers):
for file in pdf_file_articles_numbers:
if file not in xml_file_articles_numbers: logger.warning(str(file) + " has no xml match")
for file in xml_file_articles_numbers:
if file not in pdf_file_articles_numbers: logger.warning(str(file) + " has no pdf match")
def zip_matching_files(pdf_file_articles_numbers, xml_file_articles_numbers, zf, sourcedir):
for file in pdf_file_articles_numbers:
if file in xml_file_articles_numbers:
absname = file + ".pdf"
arcname = absname.split(os.sep)[-1]
zf.write(sourcedir + "/" + absname, arcname)
absname = file + ".xml"
arcname = absname.split(os.sep)[-1]
zf.write(sourcedir + "/" + absname, arcname)
def move_zipfile_to_hw_staging(xml_pdf_zip, ftp_to_hw):
shutil.move(xml_pdf_zip, ftp_to_hw + "/" + xml_pdf_zip)
def move_processed_files(pdf_file_articles_numbers, xml_file_articles_numbers, sourcedir, made_ftp_ready):
for file in pdf_file_articles_numbers:
if file in xml_file_articles_numbers:
absname = file + ".pdf"
arcname = absname.split(os.sep)[-1]
shutil.move(sourcedir + "/" + arcname, made_ftp_ready + "/" + arcname)
absname = file + ".xml"
arcname = absname.split(os.sep)[-1]
shutil.move(sourcedir + "/" + arcname, made_ftp_ready + "/" + arcname)
def set_datestamp():
a = arrow.utcnow()
date_stamp = str(a.datetime.year) + str(a.datetime.month).zfill(2) + str(a.datetime.day).zfill(2)
return date_stamp
def set_xml_pdf_zip_name():
date_stamp = set_datestamp()
xml_pdf_zip = "elife_poa_" + date_stamp + ".zip"
return xml_pdf_zip
def set_made_ftp_ready_dir():
date_stamp = set_datestamp()
made_ftp_ready = settings.MADE_FTP_READY
made_ftp_ready_dir = made_ftp_ready + "/" + date_stamp
if not os.path.exists(made_ftp_ready_dir):
os.makedirs(made_ftp_ready_dir)
return made_ftp_ready_dir
def get_filename_from_path(f, extension):
"""
Get a filename minus the supplied file extension
and without any folder or path
"""
filename = f.split(extension)[0]
# Remove path if present
try:
filename = filename.split(os.sep)[-1]
except:
pass
return filename
def prepare_pdf_xml_for_ftp():
sourcedir = settings.STAGING_TO_HW_DIR
ftp_to_hw = settings.FTP_TO_HW_DIR
pdf_files = glob.glob(sourcedir + "/*.pdf")
xml_files = glob.glob(sourcedir + "/*.xml")
pdf_file_articles_numbers = []
xml_file_articles_numbers = []
for f in pdf_files: pdf_file_articles_numbers.append(get_filename_from_path(f, ".pdf"))
for f in xml_files: xml_file_articles_numbers.append(get_filename_from_path(f, ".xml"))
made_ftp_ready_dir = set_made_ftp_ready_dir()
xml_pdf_zip = set_xml_pdf_zip_name()
zf = zipfile.ZipFile(xml_pdf_zip, "w")
check_matching_files_exist(pdf_file_articles_numbers, xml_file_articles_numbers)
zip_matching_files(pdf_file_articles_numbers, xml_file_articles_numbers, zf, sourcedir)
# Close zip file before moving
zf.close()
move_zipfile_to_hw_staging(xml_pdf_zip, ftp_to_hw)
move_processed_files(pdf_file_articles_numbers, xml_file_articles_numbers, sourcedir, made_ftp_ready_dir)
if __name__ == "__main__":
prepare_pdf_xml_for_ftp()
workflow_logger.info("pdf and xml files prepared in readyness to ftp")
|
""" Hosts the actions and a function to create them """
from collections import namedtuple
from shared.orders import Order
Action = namedtuple("Action", "event orders conditions")
def loadAction(event, dat, named):
""" Returns an action instance from a Xml structure and name mapping """
orders = []
for order in dat:
orders.append(Order().load(order, named))
return Action(event, orders, [])
def registerActions(action_dict, named):
""" Create a list of action from a Xml describing them """
actions = {}
for action in action_dict:
act = loadAction(action['event'], action['orders'], named)
if action['event'] not in actions.keys():
actions[action['event']] = []
actions[action['event']].append(act)
return actions
|
#/usr/bin/python3
# -*- coding:Utf-8 -*-
"""
Module principal, c'est le point d'entrée
"""
import os
import shutil
import datetime
import sys
import argparse
import time
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Notify', '0.7')
from gi.repository import Gtk, Gdk, GdkPixbuf, Gio, Notify
from crud import Crud
from crudportail import CrudPortail
class AppWindow(Gtk.ApplicationWindow):
""" La fenêtre principale du Gtk """
def __init__(self, app, args, crud):
Gtk.ApplicationWindow.__init__(self, title="Welcome to CRUDENOME", application=app)
# When the window is given the "delete_event" signal (this is given
# by the window manager, usually by the "close" option, or on the
# titlebar), we ask it to call the delete_event () functionpip inst
# as defined above. The data passed to the callback
# function is NULL and is ignored in the callback function.
self.connect('delete-event', self.delete_event)
# Here we connect the "destroy" event to a signal handler.
# This event occurs when we call gtk_widget_destroy() on the window,
# or if we return FALSE in the "delete_event" callback.
# self.connect("destroy", Gtk.main_quit)
Notify.init('Crudenome')
self.args = args # paramètre
self.crud = crud
self.crud.set_window(self)
self.set_title(self.crud.config["name"])
self.activate_focus()
self.set_border_width(10)
self.set_default_size(1280, 600)
if "icon_file" in self.crud.config:
self.set_icon_from_file(self.get_resource_path(self.crud.config["icon_file"]))
if "icon_name" in self.crud.config:
self.set_icon_name(self.crud.config["icon_name"])
self.crud_portail = CrudPortail(self.crud)
def get_resource_path(self, rel_path):
dir_of_py_file = os.path.dirname(__file__)
rel_path_to_resource = os.path.join(dir_of_py_file, rel_path)
abs_path_to_resource = os.path.abspath(rel_path_to_resource)
return abs_path_to_resource
def delete_event(self, widget, event, data=None):
# If you return FALSE in the "delete_event" signal handler,
# GTK will emit the "destroy" signal. Returning TRUE means
# you don't want the window to be destroyed.
# This is useful for popping up 'are you sure you want to quit?'
# type dialogs.
# print "delete event occurred"
# if self.crud.get_basehost():
# ticket_user = os.path.getmtime(self.crud.get_basename())
# ticket_host = os.path.getmtime(self.crud.get_basehost())
# if ticket_user > ticket_host:
# # la base du host a changée depuis la dernière prise
# dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.QUESTION,
# Gtk.ButtonsType.YES_NO, "La base sur le serveur a changée")
# dialog.format_secondary_text("Veux-tu écraser la base du serveur ?")
# response = dialog.run()
# if response == Gtk.ResponseType.YES:
# shutil.copy2(self.crud.get_basename(), self.crud.get_basehost())
# self.crud.logger.info("Backup OK %s %s", self.crud.get_basehost(), datetime.datetime.fromtimestamp(ticket_host))
# notif = Notify.Notification.new('Backup OK'\
# , "%s %s" % (self.crud.get_basehost(), datetime.datetime.fromtimestamp(ticket_host))\
# , 'dialog-information')
# # notif.add_action(
# # 'id_callback', # identifiant
# # 'Fermer', # texte du bouton
# # self.on_notif, # function callback de notre bouton
# # None, # user_datas, ce dont vous avez besoin dans la callback
# # None # fonction qui supprime les user_datas
# # )
# notif.show()
# time.sleep(3)
# # return True
# elif response == Gtk.ResponseType.NO:
# self.crud.logger.info("Backup abandonné")
# dialog.destroy()
# else:
# shutil.copy2(self.crud.get_basename(), self.crud.get_basehost())
# self.crud.logger.info("Backup OK %s %s", self.crud.get_basehost(), datetime.datetime.fromtimestamp(ticket_host))
# notif = Notify.Notification.new('Backup OK'\
# , "%s %s" % (self.crud.get_basehost(), datetime.datetime.fromtimestamp(ticket_host))\
# , 'dialog-information')
# # notif.add_action(
# # 'id_callback', # identifiant
# # 'Fermer', # texte du bouton
# # self.on_notif, # function callback de notre bouton
# # None, # user_datas, ce dont vous avez besoin dans la callback
# # None # fonction qui supprime les user_datas
# # )
# notif.show()
# time.sleep(3)
# # return True
# Change FALSE to TRUE and the main window will not be destroyed
# with a "delete_event".
return False
def on_notif(self, notif_object, action_name, users_data):
""" action sur boutn de la notification """
notif_object.close()
Gtk.main_quit()
class Application(Gtk.Application):
""" La classe principale d'une application Gnome """
def __init__(self, *args, **kwargs):
"""
constructor of the Gtk Application
create and activate a MyWindow, with self (the MyApplication) as
application the window belongs to.
Note that the function in C activate() becomes do_activate() in Python
"""
Gtk.Application.__init__(self, flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE, **kwargs)
self.args = args # store for parsed command line options
self.window = None
def do_activate(self):
"""
show the window and all its content
this line could go in the constructor of MyWindow as well
self.win.show_all()
"""
if not self.window:
# Windows are associated with the application
# when the last one is closed the application shuts down
# Chargement des paramètres
self.crud = Crud()
if self.args.application:
self.crud.set_app(self.args.application)
self.window = AppWindow(self, self.args, self.crud)
def do_startup(self):
"""
Start up the application
Note that the function in C startup() becomes do_startup() in Python
"""
Gtk.Application.do_startup(self)
# create a menu (a Gio.Menu)
menu = Gio.Menu()
menu.append("Préférences", "app.preference")
# append a menu item with label "About" and action "app.about"
menu.append("About", "app.about")
# append a menu item with label "Quit" and action "app.quit"
menu.append("Quit", "app.quit")
# set menu as the menu for the application
self.set_app_menu(menu)
# a new simpleaction - for the application
quit_action = Gio.SimpleAction.new("quit", None)
quit_action.connect("activate", self.on_quit)
self.add_action(quit_action)
# a new simpleaction - for the application
about_action = Gio.SimpleAction.new("about", None)
about_action.connect("activate", self.on_about)
self.add_action(about_action)
preference_action = Gio.SimpleAction.new("preference", None)
preference_action.connect("activate", self.on_preference)
self.add_action(preference_action)
def do_command_line(self, args):
'''
Gtk.Application command line handler
called if Gio.ApplicationFlags.HANDLES_COMMAND_LINE is set.
must call the self.do_activate() to get the application up and running.
'''
# https://docs.python.org/fr/3/howto/argparse.html"
# print "do_command_line", args.get_arguments()
Gtk.Application.do_command_line(self, args) # call the default commandline handler
# make a command line parser
parser = argparse.ArgumentParser(prog='crudenome')
# add a -c/--color option
parser.add_argument('-a', '--application', help="Nom de l'application au démarrage")
# parse the command line stored in args, but skip the first element (the filename)
# self.args = parser.parse_args(args.get_arguments()[1:])
self.args = parser.parse_args()
# call the main program do_activate() to start up the app
self.do_activate()
return 0
def on_about(self, action, param):
"""
La fenêtre Au sujet de...
"""
about = Gtk.AboutDialog()
about.set_transient_for(self.window)
about.set_title(self.crud.config["title"])
about.set_program_name(self.crud.config["name"])
about.set_version(self.crud.config["version"])
about.set_copyright(self.crud.config["copyright"])
about.set_comments(self.crud.config["comments"])
about.set_website(self.crud.config["web_site"])
about.set_logo(GdkPixbuf.Pixbuf.new_from_file(self.crud.config["icon_file"]))
with open('LICENSE', 'r') as file:
about.set_license(file.read())
about.connect("response", lambda d, r: d.destroy())
about.show()
def on_preference(self, action, param):
"""
Paramétrage de l'application
"""
# print "on_preference"
def on_quit(self, action, param):
"""
Fin de l'application
"""
# print "quit"
self.quit()
# get the style from the css file and apply it
# style_provider = Gtk.CssProvider()
# style_provider.load_from_path(os.path.dirname(os.path.realpath(__file__)) + '/style.css')
# Gtk.StyleContext.add_provider_for_screen(
# Gdk.Screen.get_default(),
# style_provider,
# Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
myapp = Application()
exit_status = myapp.run(sys.argv)
sys.exit(exit_status)
|
# Generated by Django 3.0.6 on 2020-09-16 19:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='auth_provider',
field=models.CharField(default='email', max_length=255),
),
]
|
# -*- coding: utf-8 -*-
currencymap = {
"AED": "United Arab Emirates dirham"
,"AFN": "Afghan afghani"
,"ALL": "Albanian lek"
,"AMD": "Armenian dram"
,"ANG": "Netherlands Antillean guilder"
,"AOA": "Angolan kwanza"
,"ARS": "Argentine peso"
,"AUD": "Australian dollar"
,"AWG": "Aruban florin"
,"AZN": "Azerbaijani manat"
,"BAM": "Bosnia and Herzegovina convertible mark"
,"BBD": "Barbadian dollar"
,"BDT": "Bangladeshi taka"
,"BGN": "Bulgarian lev"
,"BHD": "Bahraini dinar"
,"BIF": "Burundian franc"
,"BMD": "Bermudian dollar"
,"BND": "Brunei dollar"
,"BOB": "Bolivian boliviano"
,"BRL": "Brazilian real"
,"BSD": "Bahamian dollar"
,"BTN": "Bhutanese ngultrum"
,"BWP": "Botswana pula"
,"BYN": "Belarusian ruble"
,"BZD": "Belize dollar"
,"CAD": "Canadian dollar"
,"CDF": "Congolese franc"
,"CHF": "Swiss franc"
,"CLP": "Chilean peso"
,"CNY": "Chinese yuan"
,"COP": "Colombian peso"
,"CRC": "Costa Rican colón"
,"CUC": "Cuban convertible peso"
,"CUP": "Cuban peso"
,"CVE": "Cape Verdean escudo"
,"CZK": "Czech koruna"
,"DJF": "Djiboutian franc"
,"DKK": "Danish krone"
,"DOP": "Dominican peso"
,"DZD": "Algerian dinar"
,"EGP": "Egyptian pound"
,"ERN": "Eritrean nakfa"
,"ETB": "Ethiopian birr"
,"EUR": "Euro"
,"FJD": "Fijian dollar"
,"FKP": "Falkland Islands pound"
,"GBP": "British pound"
,"GEL": "Georgian lari"
,"GGP": "Guernsey pound"
,"GHS": "Ghanaian cedi"
,"GIP": "Gibraltar pound"
,"GMD": "Gambian dalasi"
,"GNF": "Guinean franc"
,"GTQ": "Guatemalan quetzal"
,"GYD": "Guyanese dollar"
,"HKD": "Hong Kong dollar"
,"HNL": "Honduran lempira"
,"HRK": "Croatian kuna"
,"HTG": "Haitian gourde"
,"HUF": "Hungarian forint"
,"IDR": "Indonesian rupiah"
,"ILS": "Israeli new shekel"
,"IMP": "Manx pound"
,"INR": "Indian rupee"
,"IQD": "Iraqi dinar"
,"IRR": "Iranian rial"
,"ISK": "Icelandic króna"
,"JEP": "Jersey pound"
,"JMD": "Jamaican dollar"
,"JOD": "Jordanian dinar"
,"JPY": "Japanese yen"
,"KES": "Kenyan shilling"
,"KGS": "Kyrgyzstani som"
,"KHR": "Cambodian riel"
,"KMF": "Comorian franc"
,"KPW": "North Korean won"
,"KRW": "South Korean won"
,"KWD": "Kuwaiti dinar"
,"KYD": "Cayman Islands dollar"
,"KZT": "Kazakhstani tenge"
,"LAK": "Lao kip"
,"LBP": "Lebanese pound"
,"LKR": "Sri Lankan rupee"
,"LRD": "Liberian dollar"
,"LSL": "Lesotho loti"
,"LYD": "Libyan dinar"
,"MAD": "Moroccan dirham"
,"MDL": "Moldovan leu"
,"MGA": "Malagasy ariary"
,"MKD": "Macedonian denar"
,"MMK": "Burmese kyat"
,"MNT": "Mongolian tögrög"
,"MOP": "Macanese pataca"
,"MRO": "Mauritanian ouguiya"
,"MRU": "Mauritanian ouguiya"
,"MUR": "Mauritian rupee"
,"MVR": "Maldivian rufiyaa"
,"MWK": "Malawian kwacha"
,"MXN": "Mexican peso"
,"MYR": "Malaysian ringgit"
,"MZN": "Mozambican metical"
,"NAD": "Namibian dollar"
,"NGN": "Nigerian naira"
,"NIO": "Nicaraguan córdoba"
,"NOK": "Norwegian krone"
,"NPR": "Nepalese rupee"
,"NZD": "New Zealand dollar"
,"OMR": "Omani rial"
,"PAB": "Panamanian balboa"
,"PEN": "Peruvian sol"
,"PGK": "Papua New Guinean kina"
,"PHP": "Philippine piso"
,"PKR": "Pakistani rupee"
,"PLN": "Polish złoty"
,"PRB": "Transnistrian ruble"
,"PYG": "Paraguayan guaraní"
,"QAR": "Qatari riyal"
,"RON": "Romanian leu"
,"RSD": "Serbian dinar"
,"RUB": "Russian ruble"
,"RWF": "Rwandan franc"
,"SAR": "Saudi riyal"
,"SBD": "Solomon Islands dollar"
,"SCR": "Seychellois rupee"
,"SDG": "Sudanese pound"
,"SEK": "Swedish krona"
,"SGD": "Singapore dollar"
,"SHP": "Saint Helena pound"
,"SLL": "Sierra Leonean leone"
,"SOS": "Somali shilling"
,"SRD": "Surinamese dollar"
,"SSP": "South Sudanese pound"
,"STD": "São Tomé and Príncipe dobra"
,"SYP": "Syrian pound"
,"SZL": "Swazi lilangeni"
,"THB": "Thai baht"
,"TJS": "Tajikistani somoni"
,"TMT": "Turkmenistan manat"
,"TND": "Tunisian dinar"
,"TOP": "Tongan paʻanga"
,"TRY": "Turkish lira"
,"TTD": "Trinidad and Tobago dollar"
,"TVD": "Tuvaluan dollar"
,"TWD": "New Taiwan dollar"
,"TZS": "Tanzanian shilling"
,"UAH": "Ukrainian hryvnia"
,"UGX": "Ugandan shilling"
,"USD": "United States dollar"
,"UYU": "Uruguayan peso"
,"UZS": "Uzbekistani soʻm"
,"VEF": "Venezuelan bolívar"
,"VND": "Vietnamese đồng"
,"VUV": "Vanuatu vatu"
,"WST": "Samoan tālā"
,"XAF": "Central African CFA franc"
,"XCD": "Eastern Caribbean dollar"
,"XOF": "West African CFA franc"
,"XPF": "CFP franc"
,"YER": "Yemeni rial"
,"ZAR": "South African rand"
,"ZMW": "Zambian kwacha"
} |
from django.contrib.auth import get_user_model
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.db import models
from django.utils.text import Truncator
from meta.models import ModelMeta
class Post(ModelMeta, models.Model):
'''
Blog Post
'''
title = models.CharField('título', max_length=200)
slug = models.SlugField('slug', max_length=200)
content = models.TextField('contenido', max_length=5000)
pub_date = models.DateTimeField('fecha de publicación')
author = models.ForeignKey(
get_user_model(), models.PROTECT, verbose_name='autor'
)
image = models.ImageField(
upload_to='blog/', verbose_name='imagen', blank=True
)
_metadata = {
'title': 'title',
'description': 'get_abstract',
'image': 'get_image'
}
def __str__(self):
return self.title
def get_abstract(self):
return Truncator(self.content).chars(200)
def get_image(self):
return self.image.url if self.image else static('images/favicon.png')
|
# -*- coding: UTF-8 -*-
from pyxer.base import *
import model
@expose
def index():
"""
List guest book entries and a form for creating new entries.
"""
if GAE:
# Google App Engine
query = model.GuestBook.all()
query.order("-date")
c.messages = query.fetch(limit=20)
else:
# Using Elixir
c.messages = model.Guestbook.query.all()
@expose
def commit(message):
"""
Write form content into database and jump to index page
to avoid that a reload of the page creates a duplicate entry.
"""
if GAE:
# Google App Engine
model.GuestBook(message=message).put()
else:
# Using Elixir
model.GuestBook(name, message)
model.commit()
# Redirect to index
redirect(".")
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 30 19:00:00 2017
@author: gsutanto
"""
import numpy as np
import os
import sys
import copy
import glob
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../dmp_coupling/utilities/'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../utilities/'))
from convertDemoToSupervisedObsAvoidFbDataset import *
from DataStacking import *
from utilities import *
data_global_coord = loadObj('data_multi_demo_vicon_static_global_coord.pkl')
dataset_Ct_obs_avoid = loadObj('dataset_Ct_obs_avoid.pkl')
unroll_dataset_Ct_obs_avoid = loadObj('unroll_dataset_Ct_obs_avoid.pkl')
#unroll_dataset_Ct_obs_avoid = loadObj('unroll_dataset_Ct_obs_avoid_recur_Ct_dataset.pkl')
model_parent_dir_path = '../tf/models/'
selected_settings_indices_file_path = model_parent_dir_path + 'selected_settings_indices.txt'
if not os.path.isfile(selected_settings_indices_file_path):
N_settings = len(data_global_coord["obs_avoid"][0])
selected_settings_indices = range(N_settings)
else:
selected_settings_indices = [(i-1) for i in list(np.loadtxt(selected_settings_indices_file_path, dtype=np.int, ndmin=1))] # file is saved following MATLAB's convention (1~222)
N_settings = len(selected_settings_indices)
print('N_settings = ' + str(N_settings))
subset_settings_indices = selected_settings_indices
subset_demos_indices = range(1)
mode_stack_dataset = 2
feature_type = 'raw'
N_primitive = 1
for prim_no in range(N_primitive):
[_,
Ct_target,
_,
_] = stackDataset(dataset_Ct_obs_avoid,
subset_settings_indices,
mode_stack_dataset,
subset_demos_indices,
feature_type,
prim_no)
[_,
Ct_unroll,
_,
_] = stackDataset(unroll_dataset_Ct_obs_avoid,
subset_settings_indices,
mode_stack_dataset,
subset_demos_indices,
feature_type,
prim_no)
nmse_unroll = computeNMSE(Ct_unroll, Ct_target)
print ('nmse_unroll = ' + str(nmse_unroll)) |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-29 21:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("villages", "0008_auto_20161228_2209")]
operations = [
migrations.AlterField(
model_name="village",
name="private",
field=models.BooleanField(
default=False,
help_text="Check if your village is invite only. Leave unchecked to welcome strangers.",
),
)
]
|
#! usr/bin/python3
# -*- coding: utf-8 -*-
#
# Flicket - copyright Paul Bourne: evereux@gmail.com
import datetime
import os
from flask import redirect, url_for, flash, render_template, g, request
from flask_babel import gettext
from flask_login import login_required
from . import flicket_bp
from application import app, db
from application.flicket.forms.flicket_forms import EditTicketForm, EditReplyForm
from application.flicket.models.flicket_models import (
FlicketHistory,
FlicketPost,
FlicketStatus,
FlicketRequesterRole,
FlicketProcedureStage,
FlicketRequestStage,
FlicketTicket,
FlicketUploads,
)
from application.flicket.models.flicket_models_ext import FlicketTicketExt
from application.flicket.scripts.flicket_functions import add_action
from application.flicket.scripts.flicket_functions import is_ticket_closed
from application.flicket.scripts.flicket_upload import UploadAttachment
# edit ticket
@flicket_bp.route(
app.config["FLICKET"] + "edit_ticket/<int:ticket_id>", methods=["GET", "POST"]
)
@login_required
def edit_ticket(ticket_id):
form = EditTicketForm(ticket_id=ticket_id)
ticket = FlicketTicket.query.filter_by(id=ticket_id).first()
if not ticket:
flash(gettext("Could not find ticket."), category="warning")
return redirect(url_for("flicket_bp.flicket_main"))
# check to see if topic is closed. ticket can't be edited once it's closed.
if is_ticket_closed(ticket.current_status.status):
return redirect(url_for("flicket_bp.ticket_view", ticket_id=ticket.id))
# check user is authorised to edit ticket. Currently, only admin or author can do this.
not_authorised = True
if ticket.user == g.user or g.user.is_admin:
not_authorised = False
if not_authorised:
flash(
gettext("You are not authorised to edit this ticket."), category="warning"
)
return redirect(url_for("flicket_bp.ticket_view", ticket_id=ticket_id))
if form.validate_on_submit():
ticket_id = FlicketTicketExt.edit_ticket(
ticket=ticket,
title=form.title.data,
user=g.user,
content=form.content.data,
requester=form.requester.data,
referee=form.referee.data,
requester_role=form.requester_role.data,
request_stage=1,
procedure_stage=form.procedure_stage.data,
domain=form.domain.data,
institute=form.institute.data,
files=request.files.getlist("file"),
days=form.days.data,
form_uploads=form.uploads.data,
)
flash("Ticket successfully edited.", category="success")
return redirect(url_for("flicket_bp.ticket_view", ticket_id=ticket_id))
form.content.data = ticket.content
form.requester.data = ticket.requester
form.referee.data = ticket.referee
form.requester_role.data = ticket.requester_role_id
form.procedure_stage.data = ticket.procedure_stage_id
form.title.data = ticket.title
form.domain.data = ticket.domain_id
form.institute.data = ticket.institute_id
form.days.data = ticket.days
title = gettext("Edit Ticket")
return render_template("flicket_edittopic.html", title=title, form=form)
# edit post
@flicket_bp.route(
app.config["FLICKET"] + "edit_post/<int:post_id>/", methods=["GET", "POST"]
)
@login_required
def edit_post(post_id):
form = EditReplyForm(post_id=post_id)
post = FlicketPost.query.filter_by(id=post_id).first()
if not post:
flash("Could not find post.", category="warning")
return redirect(url_for("flicket_bp.flicket_main"))
# check to see if topic is closed. ticket can't be edited once it's closed.
if is_ticket_closed(post.ticket.current_status.status):
return redirect(url_for("flicket_bp.ticket_view", ticket_id=post.ticket.id))
# check user is authorised to edit post. Only author or admin can do this.
not_authorised = True
if post.user == g.user or g.user.is_admin:
not_authorised = False
if not_authorised:
flash("You are not authorised to edit this ticket.", category="warning")
return redirect(url_for("flicket_bp.ticket_view", ticket_id=post.ticket_id))
if form.validate_on_submit():
# before we make any changes store the original post content in the history table if it has changed.
if post.modified_id:
history_id = post.modified_id
else:
history_id = post.user_id
if post.content != form.content.data:
history = FlicketHistory(
original_content=post.content,
post=post,
date_modified=datetime.datetime.now(),
user_id=history_id,
)
db.session.add(history)
# loop through the selected uploads for deletion.
if len(form.uploads.data) > 0:
for i in form.uploads.data:
# get the upload document information from the database.
query = FlicketUploads.query.filter_by(id=i).first()
# define the full uploaded filename
the_file = os.path.join(
app.config["ticket_upload_folder"], query.filename
)
if os.path.isfile(the_file):
# delete the file from the folder
os.remove(the_file)
db.session.delete(query)
post.content = form.content.data
post.modified = g.user
post.date_modified = datetime.datetime.now()
post.days = form.days.data
if post.ticket.request_stage_id != form.request_stage.data:
request_stage = FlicketRequestStage.query.get(form.request_stage.data)
post.ticket.request_stage = request_stage
add_action(
post.ticket,
"request_stage",
data={
"request_stage_id": request_stage.id,
"request_stage": request_stage.request_stage,
},
)
if post.ticket.procedure_stage_id != form.procedure_stage.data:
procedure_stage = FlicketProcedureStage.query.get(form.procedure_stage.data)
post.ticket.procedure_stage = procedure_stage
add_action(
post.ticket,
"procedure_stage",
data={
"procedure_stage_id": procedure_stage.id,
"procedure_stage": procedure_stage.procedure_stage,
},
)
files = request.files.getlist("file")
upload_attachments = UploadAttachment(files)
if upload_attachments.are_attachments():
upload_attachments.upload_files()
# add files to database.
upload_attachments.populate_db(post)
db.session.commit()
flash("Post successfully edited.", category="success")
return redirect(url_for("flicket_bp.ticket_view", ticket_id=post.ticket_id))
form.content.data = post.content
form.days.data = post.days
form.request_stage.data = post.ticket.request_stage_id
form.procedure_stage.data = post.ticket.procedure_stage_id
return render_template("flicket_editpost.html", title="Edit Post", form=form)
|
import pytest
from bib_downloader import doi
def test_acm_doi(monkeypatch):
acm_doi = "10.1145/3428216"
def mock_return(_doi_str):
return "MOCK RETURN"
monkeypatch.setitem(doi.PUBLISHER_LOOKUP, doi.ACM_PREFIX, mock_return)
result = doi.process_doi(acm_doi)
assert result == "MOCK RETURN"
def test_springer_doi(monkeypatch):
springer_doi = "10.1007/978-3-319-89884-1_28"
def mock_return(_doi_str):
return "MOCK RETURN"
monkeypatch.setitem(doi.PUBLISHER_LOOKUP, doi.SPRINGER_PREFIX, mock_return)
result = doi.process_doi(springer_doi)
assert result == "MOCK RETURN"
|
import sys
from resources.lib.timer.set_quick_epg_timer import SetQuickEpgTimer
if __name__ == "__main__":
SetQuickEpgTimer(sys.listitem)
|
import tensorflow as tf
import numpy as np
xin=tf.Variable(tf.constant(np.array([-1.,0.,1.]),shape=[1,3]))
we=tf.Variable(tf.constant(np.array([[0.1,0],[-0.2,0.3],[0.4,-0.6]]),shape=[3,2]))
bi=tf.Variable(tf.constant(np.array([0.3,0.1]),shape=[2]))
yout=tf.nn.tanh(tf.matmul(xin,we)+bi)
g_we = tf.gradients(yout,we)
g_bi = tf.gradients(yout,bi)
g_x=tf.gradients(yout,xin)
sess=tf.Session()
sess.run(tf.global_variables_initializer())
print(sess.run(yout))
print(sess.run(g_we))
print(sess.run(g_bi))
print(sess.run(g_x)) |
# Definition for singly-linked list.
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
# Initial, naive Solution
forward = []
reverse = []
curr = head
while curr is not None:
forward.append(curr.val)
reverse.insert(0, curr.val)
curr = curr.next
return True if forward == reverse else False
|
import numpy as np
import pickle
import glob
import os
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from pathlib import Path
from common.plotting_utils import get_scores, generate_plot, get_all_run_titles, get_all_runs_and_subruns
def make_graphs(experiment_name,
subdir,
run_titles=None,
smoothen=False,
min_length=-1,
only_longest=False,
skip_failures=False,
cumulative=False,
all_seeds=False):
if run_titles is None:
print("Using all runs in experiment")
run_titles = get_all_run_titles(experiment_name=experiment_name)
log_dirs = [
os.path.join(experiment_name, run_title) for run_title in run_titles
]
score_arrays = []
good_run_titles = []
for log_dir, run_title in zip(log_dirs, run_titles):
try:
scores = get_scores(log_dir,
subdir=subdir,
only_longest=only_longest,
min_length=min_length,
cumulative=cumulative)
score_arrays.append(scores)
good_run_titles.append(run_title)
if all_seeds:
for i, score in enumerate(scores):
score_arrays.append(np.array([score]))
good_run_titles.append(run_title + f"_{i+1}")
except Exception as e:
print(f"skipping {log_dir} due to error {e}")
pass
[
generate_plot(score_array, run_title, smoothen=smoothen)
for score_array, run_title in zip(score_arrays, good_run_titles)
]
plt.ylabel(subdir.replace("_", " "))
plt.xlabel("Episode")
plt.legend()
plt.show()
def main():
"""
Change these options and directories to suit your needs
"""
## Defaults
subdir = "scores"
smoothen = False
min_length = -1
only_longest = False
cumulative = False
all_seeds = False
## Options (Uncomment as needed)
# smoothen = True
# min_length = 300
# only_longest = True
# cumulative = True
# all_seeds = True
experiment_name = "./path/to/experiment"
run_titles = get_all_run_titles(experiment_name)
make_graphs(experiment_name,
subdir,
run_titles=run_titles,
smoothen=smoothen,
min_length=min_length,
only_longest=only_longest,
cumulative=cumulative,
all_seeds=all_seeds)
if __name__ == '__main__':
main() |
# Generated by Django 2.0.3 on 2018-03-08 08:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('matchday', models.IntegerField(default=0)),
('score_home', models.IntegerField(default=-1)),
('score_visitor', models.IntegerField(default=-1)),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.IntegerField(default=0, verbose_name="The player's score.")),
('rank', models.IntegerField(default=1, verbose_name="The player's rank")),
('user', models.OneToOneField(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('handle', models.CharField(max_length=3)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Tipp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('score_home', models.IntegerField(default=0)),
('score_visitor', models.IntegerField(default=0)),
('match', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='tippspiel.Match')),
('player', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='tippspiel.Player')),
],
),
migrations.AddField(
model_name='match',
name='team_home',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='tippspiel.Team'),
),
migrations.AddField(
model_name='match',
name='team_visitor',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='tippspiel.Team'),
),
]
|
import os
import json
import cv2
import random
import re
from bs4 import BeautifulSoup as bs
from shutil import copyfile
from tqdm import tqdm
from pathlib import Path
from abc import ABCMeta, abstractmethod
Path("../coco_df_detr/data").mkdir(parents=True, exist_ok=True)
bashCommand = "mkdir ../coco_df_detr\n" \
"echo 2"
os.system(bashCommand)
if __name__ == '__main__':
pass |
def fizzbuzz(input_number):
words = ''
if input_number % 3 == 0:
words += 'Fizz'
if input_number % 5 == 0:
words += 'Buzz'
return words or str(input_number)
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019~2999 - Cologler <skyoflw@gmail.com>
# ----------
# some object for parser
# ----------
from typing import List
import enum
import dis
from collections import defaultdict
class ID:
def __init__(self, name):
self._name = name # a name use to debug
def __repr__(self):
return f'ID({self._name})'
def __str__(self):
return repr(self)
class Scope(enum.IntEnum):
NONE = enum.auto()
LOOP = enum.auto()
WITH = enum.auto()
EXCEPT = enum.auto()
FINALLY = enum.auto()
class CodeState:
def __init__(self, *, scope=Scope.NONE):
self._ast_stack = []
self._load_stack = []
self._scope = scope
self._state: dict = None if scope is Scope.NONE else {}
self._blocks = [[]] # ensure has last block
self._instrs = [] # all handled instrs in this state
def __repr__(self):
return f'b({self._blocks!r}), l({self._load_stack!r})'
@property
def scope(self):
return self._scope
# state
@property
def state(self):
return self._state
def add_state(self, id, value):
''' add a state, also ensure it does not exists. '''
assert id not in self._state
self._state[id] = value
# instrs
def add_instr(self, instr: dis.Instruction):
''' add a handled instruction in this state '''
self._instrs.append(instr)
def get_instrs(self, key=None) -> List[dis.Instruction]:
''' get all instructions by key from this state '''
if key is None:
return self._instrs.copy()
else:
return [i for i in self._instrs if i.opcode == key or i.opname == key]
def copy(self):
''' copy a `CodeState` '''
state = CodeState()
state._load_stack = self._load_stack.copy()
state._ast_stack = self._ast_stack.copy()
return state
def copy_with_load(self, load_count):
''' copy a `CodeState` with empty ast stack. '''
state = CodeState()
state._load_stack = self._load_stack[-load_count:]
return state
def push(self, node):
''' push a node into load stack. '''
self._load_stack.append(node)
def pop(self):
''' pop the top node from load stack. '''
return self._load_stack.pop()
def pop_seq(self, count: int) -> list:
''' pop a list of top nodes from load stack. '''
assert count >= 0
if count > 0:
items = self._load_stack[-count:]
self._load_stack = self._load_stack[0:-count]
return items
else:
return []
def dup_top(self):
''' repeat top once. '''
self._load_stack.append(self._load_stack[-1])
def store(self, node):
''' store a node '''
self.add_node(node)
def add_node(self, node):
''' add a final node into ast stmt tree '''
self._blocks[-1].append(node)
def get_value(self) -> list:
''' get stmts from single block. '''
# ensure all status was handled
assert not self._state, self._state
assert not self._load_stack, self._load_stack
# get value
assert len(self._blocks) == 1, self._blocks
return self._blocks[-1]
def new_block(self):
''' make a new stmts block '''
self._blocks.append([])
def get_blocks(self) -> list:
''' get all stmts blocks. '''
# ensure all status was handled
assert not self._state, self._state
assert not self._load_stack, self._load_stack
# get value
return self._blocks
def get_block_count(self) -> int:
''' get count of stmts blocks. '''
return len(self._blocks)
class CodeReaderIter:
__slots__ = ('_reader', '_condition')
def __init__(self, reader, condition):
self._reader: CodeReader = reader
self._condition = condition
def __iter__(self):
while self._condition():
yield self._reader.pop()
def fill_state(self, state: CodeState):
''' iter self into the `CodeState` and return it. '''
for instr in self:
handler = get_instr_handler(instr)
handler(self._reader, state, instr)
state.add_instr(instr)
return state
def get_state(self, *, scope=Scope.NONE):
''' iter self into a new `CodeState`, return the `CodeState` '''
state = CodeState(scope=scope)
return self.fill_state(state)
def get_value(self, *, scope=Scope.NONE):
''' iter self into a new `CodeState`, return value from `CodeState`. '''
return self.get_state(scope=scope).get_value()
def get_blocks(self, *, scope=Scope.NONE):
''' iter self into a new `CodeState`, return blocks from `CodeState`. '''
return self.get_state(scope=scope).get_blocks()
class CodeReader:
def __init__(self, instructions):
# reversed will fast
self._instructions = list(reversed(instructions))
self._lineno = None
def __bool__(self):
return bool(self._instructions)
def __repr__(self):
return repr(list(reversed(self._instructions)))
@property
def co_consts(self):
return self._co_consts
def get_instrs_count(self) -> int:
return len(self._instructions)
def get_lineno(self) -> int:
return self._lineno
def peek(self) -> dis.Instruction:
''' peek one instr '''
if not self._instructions:
return None
return self._instructions[-1]
def pop(self) -> dis.Instruction:
''' pop one instr '''
instr = self._instructions.pop()
if instr.starts_line is not None:
self._lineno = instr.starts_line
return instr
def pop_assert(self, opcode: int) -> dis.Instruction:
instr = self.pop()
assert instr.opcode == opcode
return instr
def pop_if(self, opcode: int) -> dis.Instruction:
if self._instructions and self._instructions[-1].opcode == opcode:
return self.pop()
# read methods
def read_until_end(self):
''' read until reader end. '''
return CodeReaderIter(self, lambda: self)
def read_until_offset(self, offset: int):
''' read until come to the offset '''
return CodeReaderIter(self, lambda: self.peek().offset != offset)
def read_until_opcodes(self, *opcodes):
''' read until visit some opcodes '''
return CodeReaderIter(self, lambda: self.peek().opcode not in opcodes)
def read_until_count(self, count: int):
''' read until handled count of instrs '''
end_count = self.get_instrs_count() - count
return CodeReaderIter(self, lambda: self.get_instrs_count() > end_count)
def read_until_scoped_count(self, count: int):
''' read until handled count of instrs in current scope. '''
if count <= 0:
raise ValueError(count)
def cond():
nonlocal count
count -= 1
return count >= 0
return CodeReaderIter(self, cond)
_OPCODE_MAP = {}
def op(opname, opcode, **kwargs):
def wrapper(func):
def func_wrapper(reader, state, instr: dis.Instruction):
func(reader, state, instr, **kwargs)
assert opcode not in _OPCODE_MAP
_OPCODE_MAP[(opname, opcode)] = func_wrapper
return func
return wrapper
def get_instr_handler(instr):
'''
the return function `(reader, state, instr) -> None`
'''
k = (instr.opname, instr.opcode)
try:
return _OPCODE_MAP[k]
except KeyError:
raise NotImplementedError(k, instr)
|
from .cacbarcode import CACBarcode, PDF417Barcode, Code39Barcode |
# Extracción de una página web de información
import requests
def generate_hashtags(baselist):
url= "http://d212rkvo8t62el.cloudfront.net/tag/"
for base in baselist:
one = baselist[base][1:]
resp= requests.get(url=url+one)
data= resp.json()
baselist[base] = ['#'+hashtag['tag'] for hashtag in data['results']][:4]
return baselist
"""
if __name__ == "__main__":
generate_hashtags({"Realmadrid":"#realmadrid"})
"""
|
"""Contains utilities for plotting PPI NetworkX graphs."""
# %%
# Graphein
# Author: Arian Jamasb <arian@jamasb.io>
# License: MIT
# Project Website: https://github.com/a-r-j/graphein
# Code Repository: https://github.com/a-r-j/graphein
from typing import List, Optional
import networkx as nx
import plotly.express as px
import plotly.graph_objects as go
from matplotlib.colors import to_rgb
def plot_ppi_graph(
g: nx.Graph,
colour_edges_by: str = "kind",
with_labels: bool = True,
**kwargs,
):
"""Plots a Protein-Protein Interaction Graph. Colours edges by kind.
:param g: NetworkX graph of PPI network.
:type g: nx.Graph
:param colour_edges_by: Colour edges by this attribute. Currently, only supports 'kind', which colours edges by the source database, by default "kind"
:param with_labels: Whether to show labels on nodes. Defaults to True.
:type with_labels: bool, optional
"""
if colour_edges_by == "kind":
edge_colors = [
"r"
if g[u][v]["kind"] == {"string"}
else "b"
if g[u][v]["kind"] == {"biogrid"}
else "y"
for u, v in g.edges()
]
else:
raise ValueError(
f"Edge colouring scheme: {colour_edges_by} not supported. Please use 'kind'"
)
nx.draw(g, with_labels=with_labels, edge_color=edge_colors, **kwargs)
def plotly_ppi_graph(
g: nx.Graph,
layout: nx.layout = nx.layout.circular_layout,
title: Optional[str] = None,
show_labels: bool = False,
node_size_multiplier: float = 5.0,
node_colourscale: str = "Viridis",
edge_colours: Optional[List[str]] = None,
edge_opacity: float = 0.5,
height: int = 500,
width: int = 500,
):
"""Plots a PPI graph.
:param g: PPI graph
:type g: nx.Graph
:param layout: Layout algorithm to use. Default is circular_layout.
:type layout: nx.layout
:param title: Title of the graph. Default is None.
:type title: str, optional
:param show_labels: If True, shows labels on nodes. Default is False.
:type show_labels: bool
:param node_size_multiplier: Multiplier for node size. Default is 5.0.
:type node_size_multiplier: float
:param node_colourscale: Colour scale to use for node colours. Default is "Viridis". Options:
'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |
'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |
'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |
:type node_colourscale: str
:param edge_colours: List of colours (hexcode) to use for edges. Default is None (px.colours.qualitative.T10).
:type edge_colours: List[str], optional
:param edge_opacity: Opacity of edges. Default is 0.5.
:type edge_opacity: float
:param height: Height of the plot. Default is 500.
:type height: int
:param width: Width of the plot. Default is 500.
:type width: int
:return: Plotly figure of PPI Network
:rtype: go.Figure
"""
if edge_colours is None:
edge_colours = px.colors.qualitative.T10
edge_colours = [
f"rgba{tuple(list(to_rgb(c)) + [edge_opacity])}" for c in edge_colours
]
# Set positions
nx.set_node_attributes(g, layout(g), "pos")
# Get node and edge traces
node_trace = get_node_trace(g, node_size_multiplier, node_colourscale)
edge_trace = get_edge_trace(g, edge_colours)
traces = [node_trace] + edge_trace
# Get node labels if using them.
if show_labels:
text_trace = go.Scatter(
x=node_trace["x"],
y=node_trace["y"],
mode="text",
text=list(g.nodes()),
textposition="bottom center",
hoverinfo="text",
)
traces.append(text_trace)
# Assemble plot from traces
return go.Figure(
data=traces,
layout=go.Layout(
title=title,
titlefont_size=16,
showlegend=False,
width=width,
height=height,
hovermode="closest",
margin=dict(b=20, l=5, r=5, t=40),
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
),
)
def get_node_trace(
g: nx.Graph, node_size_multiplier: float, node_colourscale: str = "Viridis"
) -> go.Scatter:
"""Produces the node trace for the plotly plot.
:param g: PPI graph with ['pos'] added to the nodes (eg via nx.layout function)
:type g: nx.Graph
:param node_size_multiplier: Multiplier for node size. Default is 5.0.
:type node_size_multiplier: float
:param node_colourscale: Colourscale to use for the nodes, defaults to "Viridis"
:type node_colourscale: str, optional
:return: Node trace for plotly plot
:rtype: go.Scatter
"""
node_x = []
node_y = []
node_size = []
for n in g.nodes():
x, y = g.nodes[n]["pos"]
node_x.append(x)
node_y.append(y)
node_size.append(g.degree(n) * node_size_multiplier)
node_trace = go.Scatter(
x=node_x,
y=node_y,
mode="markers",
hoverinfo="text",
marker=dict(
showscale=False,
colorscale=node_colourscale,
reversescale=True,
color=[],
size=node_size,
colorbar=dict(
thickness=15,
title="Node Connections",
xanchor="left",
titleside="right",
),
line_width=2,
),
)
node_text = list(g.nodes())
node_trace.marker.color = node_size
node_trace.text = node_text
return node_trace
def get_edge_trace(
g: nx.Graph,
edge_colours: Optional[List[str]] = None,
) -> List[go.Scatter]:
"""Gets edge traces from PPI graph. Returns a list of traces enabling edge colours to be set individually.
:param g: _description_
:type g: nx.Graph
:return: _description_
:rtype: List[go.Scatter]
"""
if edge_colours is None:
edge_colours = ["red", "blue", "yellow"]
traces = []
for u, v, d in g.edges(data=True):
# Get positions
x0, y0 = g.nodes[u]["pos"]
x1, y1 = g.nodes[v]["pos"]
# Assign colour
if d["kind"] == {"string"}:
colour = edge_colours[0]
elif d["kind"] == {"biogrid"}:
colour = edge_colours[1]
else:
colour = edge_colours[2]
edge_trace = go.Scatter(
line=dict(width=2, color=colour),
hoverinfo="text",
x=(x0, x1),
y=(y0, y1),
mode="lines",
text=[
" / ".join(list(edge_type)) for edge_type in g[u][v]["kind"]
],
)
traces.append(edge_trace)
return traces
if __name__ == "__main__":
from functools import partial
from graphein.ppi.config import PPIGraphConfig
from graphein.ppi.edges import add_biogrid_edges, add_string_edges
from graphein.ppi.graphs import compute_ppi_graph
config = PPIGraphConfig()
protein_list = [
"CDC42",
"CDK1",
"KIF23",
"PLK1",
"RAC2",
"RACGAP1",
"RHOA",
"RHOB",
]
g = compute_ppi_graph(
protein_list=protein_list,
edge_construction_funcs=[
partial(add_string_edges),
partial(add_biogrid_edges),
],
)
plot_ppi_graph(g)
plotly_ppi_graph(g)
# %%
|
#!/usr/bin/env python
#
# Utility methods for security descriptor manipulation
#
# Copyright Nadezhda Ivanova 2010 <nivanova@samba.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import samba
from ldb import Message, MessageElement, Dn
from ldb import FLAG_MOD_REPLACE, SCOPE_BASE
from samba.ndr import ndr_pack, ndr_unpack
from samba.dcerpc import security
class SDUtils:
'''Some utilities for manipulation of security descriptors
on objects'''
def __init__(self, samdb):
self.ldb = samdb
self.domain_sid = security.dom_sid(self.ldb.get_domain_sid())
def modify_sd_on_dn(self, object_dn, sd, controls=None):
""" Modify security descriptor using either SDDL string
or security.descriptor object
"""
m = Message()
m.dn = Dn(self.ldb, object_dn)
assert(isinstance(sd, str) or isinstance(sd, security.descriptor))
if isinstance(sd, str):
tmp_desc = security.descriptor.from_sddl(sd, self.domain_sid)
elif isinstance(sd, security.descriptor):
tmp_desc = sd
m["nTSecurityDescriptor"] = MessageElement(ndr_pack(tmp_desc),
FLAG_MOD_REPLACE,
"nTSecurityDescriptor")
self.ldb.modify(m, controls)
def read_sd_on_dn(self, object_dn, controls=None):
res = self.ldb.search(object_dn, SCOPE_BASE, None,
["nTSecurityDescriptor"], controls=controls)
desc = res[0]["nTSecurityDescriptor"][0]
return ndr_unpack(security.descriptor, desc)
def get_object_sid(self, object_dn):
res = self.ldb.search(object_dn)
return ndr_unpack(security.dom_sid, res[0]["objectSid"][0])
def dacl_add_ace(self, object_dn, ace):
""" Adds an ACE to an objects security descriptor
"""
desc = self.read_sd_on_dn(object_dn)
desc_sddl = desc.as_sddl(self.domain_sid)
if ace in desc_sddl:
return
if desc_sddl.find("(") >= 0:
desc_sddl = desc_sddl[:desc_sddl.index("(")] + ace + desc_sddl[desc_sddl.index("("):]
else:
desc_sddl = desc_sddl + ace
self.modify_sd_on_dn(object_dn, desc_sddl)
def get_sd_as_sddl(self, object_dn, controls=None):
""" Return object nTSecutiryDescriptor in SDDL format
"""
desc = self.read_sd_on_dn(object_dn, controls=controls)
return desc.as_sddl(self.domain_sid)
|
class AnnotationEvent:
event_type = 'event'
def __init__(self, time):
self.time = time
def to_json(self):
return {
**self.__dict__,
'type': self.event_type
}
|
from armulator.armv6.opcodes.abstract_opcodes.umaal import Umaal
from armulator.armv6.opcodes.opcode import Opcode
class UmaalT1(Umaal, Opcode):
def __init__(self, instruction, m, d_hi, d_lo, n):
Opcode.__init__(self, instruction)
Umaal.__init__(self, m, d_hi, d_lo, n)
def is_pc_changing_opcode(self):
return False
@staticmethod
def from_bitarray(instr, processor):
rm = instr[28:32]
rd_hi = instr[20:24]
rd_lo = instr[16:20]
rn = instr[12:16]
if (rm.uint in (13, 15) or
rn.uint in (13, 15) or
rd_hi.uint in (13, 15) or
rd_lo.uint in (13, 15) or
rd_hi.uint == rd_lo.uint):
print "unpredictable"
else:
return UmaalT1(instr, **{"m": rm.uint, "d_hi": rd_hi.uint, "d_lo": rd_lo.uint, "n": rn.uint})
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['WorkflowTemplateArgs', 'WorkflowTemplate']
@pulumi.input_type
class WorkflowTemplateArgs:
def __init__(__self__, *,
id: pulumi.Input[str],
jobs: pulumi.Input[Sequence[pulumi.Input['OrderedJobArgs']]],
placement: pulumi.Input['WorkflowTemplatePlacementArgs'],
dag_timeout: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['TemplateParameterArgs']]]] = None,
project: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a WorkflowTemplate resource.
:param pulumi.Input[str] id: The template id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters..
:param pulumi.Input[Sequence[pulumi.Input['OrderedJobArgs']]] jobs: The Directed Acyclic Graph of Jobs to submit.
:param pulumi.Input['WorkflowTemplatePlacementArgs'] placement: WorkflowTemplate scheduling information.
:param pulumi.Input[str] dag_timeout: Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template.
:param pulumi.Input[Sequence[pulumi.Input['TemplateParameterArgs']]] parameters: Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
:param pulumi.Input[int] version: Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "jobs", jobs)
pulumi.set(__self__, "placement", placement)
if dag_timeout is not None:
pulumi.set(__self__, "dag_timeout", dag_timeout)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if location is not None:
pulumi.set(__self__, "location", location)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if project is not None:
pulumi.set(__self__, "project", project)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
The template id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters..
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def jobs(self) -> pulumi.Input[Sequence[pulumi.Input['OrderedJobArgs']]]:
"""
The Directed Acyclic Graph of Jobs to submit.
"""
return pulumi.get(self, "jobs")
@jobs.setter
def jobs(self, value: pulumi.Input[Sequence[pulumi.Input['OrderedJobArgs']]]):
pulumi.set(self, "jobs", value)
@property
@pulumi.getter
def placement(self) -> pulumi.Input['WorkflowTemplatePlacementArgs']:
"""
WorkflowTemplate scheduling information.
"""
return pulumi.get(self, "placement")
@placement.setter
def placement(self, value: pulumi.Input['WorkflowTemplatePlacementArgs']):
pulumi.set(self, "placement", value)
@property
@pulumi.getter(name="dagTimeout")
def dag_timeout(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
"""
return pulumi.get(self, "dag_timeout")
@dag_timeout.setter
def dag_timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dag_timeout", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TemplateParameterArgs']]]]:
"""
Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TemplateParameterArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[int]]:
"""
Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "version", value)
class WorkflowTemplate(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dag_timeout: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
jobs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OrderedJobArgs']]]]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TemplateParameterArgs']]]]] = None,
placement: Optional[pulumi.Input[pulumi.InputType['WorkflowTemplatePlacementArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Creates new workflow template.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dag_timeout: Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
:param pulumi.Input[str] id: The template id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters..
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OrderedJobArgs']]]] jobs: The Directed Acyclic Graph of Jobs to submit.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TemplateParameterArgs']]]] parameters: Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
:param pulumi.Input[pulumi.InputType['WorkflowTemplatePlacementArgs']] placement: WorkflowTemplate scheduling information.
:param pulumi.Input[int] version: Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WorkflowTemplateArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates new workflow template.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param WorkflowTemplateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WorkflowTemplateArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dag_timeout: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
jobs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OrderedJobArgs']]]]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TemplateParameterArgs']]]]] = None,
placement: Optional[pulumi.Input[pulumi.InputType['WorkflowTemplatePlacementArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WorkflowTemplateArgs.__new__(WorkflowTemplateArgs)
__props__.__dict__["dag_timeout"] = dag_timeout
if id is None and not opts.urn:
raise TypeError("Missing required property 'id'")
__props__.__dict__["id"] = id
if jobs is None and not opts.urn:
raise TypeError("Missing required property 'jobs'")
__props__.__dict__["jobs"] = jobs
__props__.__dict__["labels"] = labels
__props__.__dict__["location"] = location
__props__.__dict__["parameters"] = parameters
if placement is None and not opts.urn:
raise TypeError("Missing required property 'placement'")
__props__.__dict__["placement"] = placement
__props__.__dict__["project"] = project
__props__.__dict__["version"] = version
__props__.__dict__["create_time"] = None
__props__.__dict__["name"] = None
__props__.__dict__["update_time"] = None
super(WorkflowTemplate, __self__).__init__(
'google-native:dataproc/v1beta2:WorkflowTemplate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WorkflowTemplate':
"""
Get an existing WorkflowTemplate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WorkflowTemplateArgs.__new__(WorkflowTemplateArgs)
__props__.__dict__["create_time"] = None
__props__.__dict__["dag_timeout"] = None
__props__.__dict__["jobs"] = None
__props__.__dict__["labels"] = None
__props__.__dict__["name"] = None
__props__.__dict__["parameters"] = None
__props__.__dict__["placement"] = None
__props__.__dict__["update_time"] = None
__props__.__dict__["version"] = None
return WorkflowTemplate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
"""
The time template was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="dagTimeout")
def dag_timeout(self) -> pulumi.Output[str]:
"""
Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
"""
return pulumi.get(self, "dag_timeout")
@property
@pulumi.getter
def jobs(self) -> pulumi.Output[Sequence['outputs.OrderedJobResponse']]:
"""
The Directed Acyclic Graph of Jobs to submit.
"""
return pulumi.get(self, "jobs")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Mapping[str, str]]:
"""
Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance.Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be associated with a template.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Sequence['outputs.TemplateParameterResponse']]:
"""
Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter
def placement(self) -> pulumi.Output['outputs.WorkflowTemplatePlacementResponse']:
"""
WorkflowTemplate scheduling information.
"""
return pulumi.get(self, "placement")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> pulumi.Output[str]:
"""
The time template was last updated.
"""
return pulumi.get(self, "update_time")
@property
@pulumi.getter
def version(self) -> pulumi.Output[int]:
"""
Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request.
"""
return pulumi.get(self, "version")
|
#
# Brambox version: Automatically generated version file
# Copyright EAVISE
#
__version__ = "1.1.0+b"
|
#!/usr/bin/python3
# coding=utf-8
#
import os
import threading
import socketserver
import select
import struct
import logging
import logging.config
import datetime
import time
import json
import yaml
import ssl
import paho.mqtt.publish as publish
from crc import CRC_GT02
THREAD_TIMEOUT = 120 # seconds
config = {} # Global variable for the config
# Load logging config from logging.yaml
def setup_logging(default_path='logging.yaml', default_level=logging.INFO, env_key='LOG_CFG'):
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.load(f)
logging.config.dictConfig(config)
logging.info("Configured logging from yaml")
else:
logging.basicConfig(level=default_level)
logging.info("Configured logging basic")
# Load config from yaml file
def load_config(path='config.yaml'):
config = None
log = logging.getLogger(__name__)
if os.path.exists(path):
log.debug("Loading config from: " + str(path))
with open(path, 'r') as y:
config = yaml.load(y)
log.debug("Config: " + str(config))
else:
log.error("Config file not found: " + path)
return config
def validate_gt02(data):
log = logging.getLogger(__name__)
crc16 = CRC_GT02()
bad_data = False
log.debug("Data: " + ' '.join(format(x, '02x') for x in data))
# Process the data we just got
data_length = len(data)
if data_length > 10:
# Could be a valid packet
# Check start
start = data[0:2]
if start != b'\x78\x78':
log.error("Bad start to received data packet")
bad_data = True
# Confirm correct data length
length = data[2]
calc_length = data_length - 5
if length != calc_length:
log.error("Length mismatch -" +
" Calculated: " + str(calc_length) +
", Supplied: " + str(length))
bad_data = True
# Confirm checksum
crc = data[-4:-2]
to_crc = data[2:-4]
calc_crc = crc16.crcb(to_crc)
if calc_crc != crc:
log.error("Checksum mismatch -" +
" Calculated: %02x, Supplied: %02x"
% (calc_crc, crc))
bad_data = True
# Check ending bytes
end = data[-2:]
if end != b'\r\n':
log.error("Ending bytes are incorrect")
bad_data = True
else:
# Data packet is too short to be valid
bad_data = True
if not bad_data:
protocol = data[3]
payload = data[4:-6]
serial = data[-6:-4]
# Build a response packet
response_payload = b'\x05' + \
bytes([protocol,
serial[0],
serial[1]]
)
response_crc = crc16.crcb(response_payload)
response = b'\x78\x78' \
+ response_payload \
+ response_crc \
+ b'\x0d\x0a'
return {'protocol': protocol,
'payload': payload,
'serial': serial,
'response': response
}
else:
return None
def parse_login(data):
log = logging.getLogger(__name__)
auth = {'ok': False}
imei = ''.join(format(x, '02x') for x in data)
auth['imei'] = imei
if imei in config['imei']:
device = config['imei'][imei]['device']
tid = config['imei'][imei]['tid']
topic = "owntracks/" + device + "/" + tid
log.info("Login from " + imei +
" reporting as " + topic
)
auth['ok'] = True
auth['topic'] = topic
auth['device'] = device
auth['tid'] = tid
else:
log.error("Unknown device: " + imei)
return auth
def parse_location(data):
log = logging.getLogger(__name__)
log.debug("Parsing location packet")
# Date time
year = data[0] + 2000
month = data[1]
day = data[2]
hour = data[3]
minute = data[4]
second = data[5]
dt = datetime.datetime(year, month, day, hour, minute, second)
tst = int(time.mktime(dt.timetuple()))
log.debug("Datetime: " + str(dt))
# GPS Quality
bit_length = (data[6] & 0xf0) >> 4
num_sats = data[6] & 0x0f
log.debug("GPS Quality: Bitlength = " + str(bit_length) +
" Number of satellites = " + str(num_sats))
# Speed
speed = data[15]
if speed < 1:
log.debug("Moving at " + str(speed) + " kph")
else:
log.debug("Not moving")
# Status and direction
status = (data[16] << 8) & data[17]
direction_deg = status & 0x3f
log.debug("Heading: " + str(direction_deg) + "°")
if status & 0x0400:
lat_hemi = 'S'
else:
lat_hemi = 'N'
if status & 0x0800:
lon_hemi = 'E'
else:
lon_hemi = 'W'
if status & 0x1000:
gps_pos_ok = False
else:
gps_pos_ok = True
if status & 0x2000:
gps_pos = "Differential"
else:
gps_pos = "Live"
log.debug("GPS Status: Fix " +
str(gps_pos_ok) +
" data is " + gps_pos
)
# Lat / Lon
lat_raw = struct.unpack('>I', data[7:11])[0]
lon_raw = struct.unpack('>I', data[11:15])[0]
lat_dd = lat_raw / (30000 * 60)
lon_dd = lon_raw / (30000 * 60)
lat_deg = int(lat_dd)
lat_min = (lat_dd - lat_deg) * 60
lon_deg = int(lon_dd)
lon_min = (lon_dd - lon_deg) * 60
loc_txt = str(lat_deg) + "° "
loc_txt += format(lat_min, '02.4f') + "'" + lat_hemi + " "
loc_txt += str(lon_deg) + "° "
loc_txt += format(lon_min, '03.4f') + "'" + lon_hemi
# Correct the decimal degrees sign if needed
if lat_hemi == 'S':
lat_dd = -lat_dd
if lon_hemi == 'W':
lon_dd = -lon_dd
log.debug("DD: " + str(lat_dd) + " " + str(lon_dd))
# Log position and movement
if speed < 1:
log.info("Static Location: " +
str(dt) +
" (" + format(lat_dd, '02.4f') + ", " +
format(lon_dd, '03.4f') + "), " +
loc_txt +
" [" + str(num_sats) + "]"
)
else:
log.info("Moving, Location: " +
str(dt) +
" (" + format(lat_dd, '02.4f') + ", " +
format(lon_dd, '03.4f') + "), " +
loc_txt +
" speed " + str(speed) +
" kph, heading " + str(direction_deg) +
" [" + str(num_sats) + "]"
)
# GSM Info
mcc = (data[17] << 8) & data[18]
mnc = data[19]
lac = (data[20] << 8) & data[21]
cell_id = (data[22] << 8) & data[23]
log.debug("GSM Data:" +
" MCC: 0x" + format(mcc, '04x') +
" MNC: 0x" + format(mnc, '02x') +
" LAC: 0x" + format(lac, '04x') +
" Cell ID: 0x" + format(cell_id, '04x')
)
info = {
'datetime': dt,
'timestamp': tst,
'lat': lat_dd,
'lon': lon_dd,
'position': loc_txt,
'speed': speed,
'heading': direction_deg,
'satellites': num_sats,
'locked': gps_pos_ok,
'pos_status': gps_pos,
'bitlength': bit_length,
'cell_id': cell_id,
'mcc': mcc,
'mnc': mnc,
'lac': lac
}
return info
def parse_status(data):
log = logging.getLogger(__name__)
log.debug("Parsing status packet")
# Extract
info = data[0]
voltage = data[1]
signal = data[2]
alarm_lang = data[3]
# Bit data
if info & 0x80:
immobilised = True
else:
immobilised = False
if info & 0x40:
tracking = True
else:
tracking = False
alarm = (info & 0x38) >> 3
if info & 0x04:
charging = True
else:
charging = False
if info & 0x02:
ignition = True
else:
ignition = False
if info & 0x01:
active = False
else:
active = True
log.debug("Vehicle:" +
" Immobilised - " + str(immobilised) +
" Ingition on - " + str(ignition) +
" Alarm 1 - " + str(alarm) +
" Alarm 2 - " + str(alarm_lang)
)
log.debug("System:"
" Active - " + str(active) +
" Tracking - " + str(tracking) +
" Charging - " + str(charging) +
" Voltage - " + str(voltage) +
" Signal - " + str(signal)
)
info = {
'active': active,
'tracking': tracking,
'charging': charging,
'voltage': voltage,
'signal': signal,
'immobilised': immobilised,
'ignition': ignition,
'alarm1': alarm,
'alarm2': alarm_lang
}
return info
class ThreadedRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
"""
Create response to incoming packet
Build a response from an incoming GT06 protocol GPS Tracker packet
"""
log = logging.getLogger(__name__)
log.debug("New processing thread started: "
+ threading.current_thread().name
)
done = False
# mqtt broker information
broker = config['mqtt']['broker']
port = config['mqtt']['port']
mqtt_auth = {
'username': config['mqtt']['username'],
'password': config['mqtt']['password']
}
if 'tls' in config:
tls = {
'ca_certs': config['tls']['ca_certs']
}
else:
tls = None
auth = None
while not done:
# Loop until we signal exit by no or bad data
log.debug("Blocking on incoming data")
ready = select.select([self.request], [], [], THREAD_TIMEOUT)
if not ready[0]:
# Timeout has occured and we are done
log.debug("No message received for " + str(THREAD_TIMEOUT) +
" seconds, " + threading.current_thread().name +
" exiting")
done = True
else:
# We didn't timeout
b = self.request.recv(260)
if not b:
# If we get zero length data from this call then the
# socket is closed, so we are done!
log.debug("Thread "
+ threading.current_thread().name()
+ " ending, socket closed")
done = True
else:
# We got some data
content = validate_gt02(b)
if content is not None:
# It looked like a valid packet
if auth is None:
# Not yet authorised so only allow login
if content['protocol'] == 0x01:
# Login
auth = parse_login(content['payload'])
if auth['ok'] is not True:
# Either bad data or we don't recognise imei
done = True
# We must be authorised, so look at other protocols
elif content['protocol'] == 0x12:
# Location Data
log.debug("Location packet received")
# Extract data from payload
info = parse_location(content['payload'])
log.debug("Extracted: " + str(info))
owntracks = {
'_type': 'location',
'tid': auth['tid'],
'imei': auth['imei'],
'lat': info['lat'],
'lon': info['lon'],
'cog': info['heading'],
'vel': info['speed'],
'tst': info['timestamp']
}
if not info['locked'] or info['satellites'] < 4:
owntracks['acc'] = 10000
else:
# This is a guestimate
owntracks['acc'] = int(200
/ info['satellites'])
# Publish to mqtt
log.debug("Sending via mqtt: " +
auth['topic'] + " " +
str(owntracks)
)
publish.single(
auth['topic'],
payload = json.dumps(owntracks),
tls = tls,
auth = mqtt_auth,
hostname = broker,
port = int(port)
)
log.debug("Sent successfully")
elif content['protocol'] == 0x13:
# Status Information
log.debug("Status packet received")
info = parse_status(content['payload'])
log.info("Status: " +
"Active: " +
str(info["active"]) +
", Tracking: " +
str(info["tracking"]) +
", GSM Signal: " +
str(info["signal"])
)
elif content['protocol'] == 0x15:
# String Information
log.warning("String packet received - " +
"NOT IMPLEMENTED")
elif content['protocol'] == 0x16:
# Alarm Information
log.warning("Alarm packet received - " +
"NOT IMPLEMENTED")
elif content['protocol'] == 0x1A:
# GPS query by phone
log.warning("GPS query by phone packet received - "
+ "NOT IMPLEMENTED")
else:
log.error("Unknown protocol: " +
str(content['protocol']))
done = True
if not done:
log.debug("Response packet: " +
' '.join(format(x, '02x') for x in
content['response']))
self.request.send(content['response'])
else:
done = True
log.error("Bad data received")
class ThreadedServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
daemon_threads = True
def main():
setup_logging()
log = logging.getLogger(__name__)
log.info("Starting GT02 Server...")
log.info("Reading config file: config.yaml")
global config
config = load_config()
# Create a socket server
try:
server = ThreadedServer(("", 9000), ThreadedRequestHandler)
server.serve_forever()
finally:
server.server_close()
if __name__ == "__main__":
main()
|
#!/usr/bin/env/python
# File name : LED_test.py
# Author : majki
# Date : 2020/11/01
#import web_pdb; web_pdb.set_trace()
import move
import robotLight
import ultra
import os
#import info
import RPIservo
from time import sleep
def arm_park():
sc.certSpeed([12,13,14], [50,50,0], [70,70,40])
def arm_reach_front():
sc.certSpeed([12,13], [-80,-80], [70,70])
def arm_reach_back():
sc.certSpeed([12,13], [-80,70], [70,70])
def move_forward(speed: int, steps: int):
for step in range(0, steps):
move.move(speed, "forward", "no")
sleep(0.1)
move.motorStop()
def move_backward(speed: int, steps: int):
for step in range(0, steps):
move.move(speed, "backward", "no")
sleep(0.1)
move.motorStop()
def parking_sensor(RL):
RL.parking_sensor()
while True:
dist = ultra.checkdist()
#print(dist)
if dist < 1.0:
RL.parking_sensor_off_time = dist
else:
RL.parking_sensor_off_time = 0.95
if dist < 0.5:
RL.parking_sensor_color = [255, 0, 0]
else:
RL.parking_sensor_color = [0, 255, 0]
sleep(0.1)
def init():
try:
RL=robotLight.RobotLight()
RL.start()
#RL.breath(70,70,255)
#RL.hazard()
parking_sensor(RL)
except:
print('Use "sudo pip3 install rpi_ws281x" to install WS_281x package')
pass
try:
sc = RPIservo.ServoCtrl()
sc.start()
except:
pass
try:
move.setup()
except:
pass
if __name__ == '__main__':
init()
|
"""
Luke
"""
from mmcv.utils import Registry, build_from_cfg
from torch import nn
TOKENIZERS = Registry('tokenizer')
def build_tokenizer(cfg, default_args=None):
if 'Wrapper' in cfg['type']:
wrapper = build_from_cfg(cfg, TOKENIZERS, default_args)
return wrapper.tokenizer
else:
return build_from_cfg(cfg, TOKENIZERS, default_args)
|
from flask import jsonify, request
from flask_restful import Resource
from zeep.exceptions import Fault
from marshmallow import ValidationError
from ..libs import VivaMyPages
from ..libs import VivaAttachments
from ..libs import hash_to_personal_number
from ..libs import authenticate
class Attachment(Resource):
method_decorators = [authenticate]
def get(self, hash_id=str, attachment_id=str):
try:
personal_number = hash_to_personal_number(hash_id=hash_id)
viva_attachments = VivaAttachments(
my_pages=VivaMyPages(user=personal_number))
attachement = viva_attachments.get(attachment_id=attachment_id)
return attachement
except Exception as error:
return {
'message': f'{error}',
'code': 400
}, 400
|
from PIL import Image # Python Imaging Library
import math # Maths functions
import sys # Allows us to access function args
import os # Allows us to split the text for saving the file
filePath = sys.argv[1]
# adjusted the layout of the cubes to match the format used by humus.
# X -> Z
# Y -> X
# Z -> Y
#posx = Image.open(filePath+"posz.jpg")
#negx = Image.open(filePath+"negz.jpg")
#posy = Image.open(filePath+"posx.jpg")
#negy = Image.open(filePath+"negx.jpg")
#posz = Image.open(filePath+"posy.jpg")
#negz = Image.open(filePath+"negy.jpg")
#adaptation au nom de fichier norme cube 360
posx = Image.open(filePath+"front.jpg")
negx = Image.open(filePath+"back.jpg")
posy = Image.open(filePath+"right.jpg")
negy = Image.open(filePath+"left.jpg")
posz = Image.open(filePath+"top.jpg")
negz = Image.open(filePath+"down.jpg")
squareLength = posx.size[0]
halfSquareLength = squareLength/2
outputWidth = squareLength*2
outputHeight = squareLength*1
output = []
def unit3DToUnit2D(x,y,z,faceIndex):
if(faceIndex=="X+"):
x2D = y+0.5
y2D = z+0.5
elif(faceIndex=="Y+"):
x2D = (x*-1)+0.5
y2D = z+0.5
elif(faceIndex=="X-"):
x2D = (y*-1)+0.5
y2D = z+0.5
elif(faceIndex=="Y-"):
x2D = x+0.5
y2D = z+0.5
elif(faceIndex=="Z+"):
x2D = y+0.5
y2D = (x*-1)+0.5
else:
x2D = y+0.5
y2D = x+0.5
# need to do this as image.getPixel takes pixels from the top left corner.
y2D = 1-y2D
return (x2D,y2D)
def projectX(theta,phi,sign):
x = sign*0.5
faceIndex = "X+" if sign==1 else "X-"
rho = float(x)/(math.cos(theta)*math.sin(phi))
y = rho*math.sin(theta)*math.sin(phi)
z = rho*math.cos(phi)
return (x,y,z,faceIndex)
def projectY(theta,phi,sign):
y = sign*0.5
faceIndex = "Y+" if sign==1 else "Y-"
rho = float(y)/(math.sin(theta)*math.sin(phi))
x = rho*math.cos(theta)*math.sin(phi)
z = rho*math.cos(phi)
return (x,y,z,faceIndex)
def projectZ(theta,phi,sign):
z = sign*0.5
faceIndex = "Z+" if sign==1 else "Z-"
rho = float(z)/math.cos(phi)
x = rho*math.cos(theta)*math.sin(phi)
y = rho*math.sin(theta)*math.sin(phi)
return (x,y,z,faceIndex)
def getColour(x,y,index):
if(index=="X+"):
return posx.getpixel((x,y))
elif(index=="X-"):
return negx.getpixel((x,y))
elif(index=="Y+"):
return posy.getpixel((x,y))
elif(index=="Y-"):
return negy.getpixel((x,y))
elif(index=="Z+"):
return posz.getpixel((x,y))
elif(index=="Z-"):
return negz.getpixel((x,y))
def convertEquirectUVtoUnit2D(theta,phi):
# calculate the unit vector
x = math.cos(theta)*math.sin(phi)
y = math.sin(theta)*math.sin(phi)
z = math.cos(phi)
# find the maximum value in the unit vector
maximum = max(abs(x),abs(y),abs(z))
xx = x/maximum
yy = y/maximum
zz = z/maximum
# project ray to cube surface
if(xx==1 or xx==-1):
(x,y,z, faceIndex) = projectX(theta,phi,xx)
elif(yy==1 or yy==-1):
(x,y,z, faceIndex) = projectY(theta,phi,yy)
else:
(x,y,z, faceIndex) = projectZ(theta,phi,zz)
(x,y) = unit3DToUnit2D(x,y,z,faceIndex)
x*=squareLength
y*=squareLength
x = int(x)
y = int(y)
return {"index":faceIndex,"x":x,"y":y}
# 1. loop through all of the pixels in the output image
for loopY in range(0,int(outputHeight)): # 0..height-1 inclusive
for loopX in range(0,int(outputWidth)):
# 2. get the normalised u,v coordinates for the current pixel
U = float(loopX)/(outputWidth-1) # 0..1
V = float(loopY)/(outputHeight-1) # no need for 1-... as the image output needs to start from the top anyway.
# 3. taking the normalised cartesian coordinates calculate the polar coordinate for the current pixel
theta = U*2*math.pi
phi = V*math.pi
# 4. calculate the 3D cartesian coordinate which has been projected to a cubes face
cart = convertEquirectUVtoUnit2D(theta,phi)
# 5. use this pixel to extract the colour
output.append(getColour(cart["x"],cart["y"],cart["index"]))
# 6. write the output array to a new image file
outputImage = Image.new("RGB",((int(outputWidth)),(int(outputHeight))), None)
outputImage.putdata(output)
outputImage.save(filePath+"pano360.png")
|
# IPython log file
from tqdm import tqdm
import dask.array as da
import zarr
import itertools
from skimage.transform import downscale_local_mean
lls = da.from_zarr('data/gokul-lls/aollsm-m4-560nm.zarr')
lls3 = zarr.open('data/gokul-lls/aollsm-m4-560nm-downscale.zarr', dtype=np.float32, shape=(199, 201, 192, 256), chunks=(1, 201, 192, 256))
indices = list(itertools.product(range(199), range(201)))
for i, j in tqdm(indices):
lls3[i, j] = downscale_local_mean(np.array(lls[i, j]), (4, 4))
|
"""
ExportProgressDialog
:Authors:
Berend Klein Haneveld 2013
"""
from PySide.QtGui import QDialog
from PySide.QtGui import QGridLayout
from PySide.QtGui import QProgressBar
from PySide.QtGui import QLabel
class ExportProgressDialog(QDialog):
"""
ExportProgressDialog is a dialog that
shows a progress bar or busy indicator
"""
def __init__(self, parent, message):
super(ExportProgressDialog, self).__init__(parent)
self.setModal(True)
self.setWindowTitle(message)
indicator = QProgressBar()
indicator.setMinimum(0)
indicator.setMaximum(0)
messageLabel = QLabel(message)
layout = QGridLayout()
layout.addWidget(messageLabel)
layout.addWidget(indicator)
self.setLayout(layout)
|
import matplotlib as mpl
import numpy as np
from matplotlib import pyplot as plt
def show_sample(sample):
"""Shows the sample with tasks and answers"""
print("Train:")
for i in range(len(sample["train"])):
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax1.matshow(np.array(sample["train"][i]["input"]), cmap="Set3", norm=mpl.colors.Normalize(vmin=0, vmax=9))
ax2 = fig.add_subplot(122)
ax2.matshow(np.array(sample["train"][i]["output"]), cmap="Set3", norm=mpl.colors.Normalize(vmin=0, vmax=9))
plt.show()
print("Test:")
for i in range(len(sample["test"])):
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax1.matshow(np.array(sample["test"][i]["input"]), cmap="Set3", norm=mpl.colors.Normalize(vmin=0, vmax=9))
if "output" in sample["test"][i]:
ax2 = fig.add_subplot(122)
ax2.matshow(np.array(sample["test"][i]["output"]), cmap="Set3", norm=mpl.colors.Normalize(vmin=0, vmax=9))
plt.show()
def matrix2answer(array):
s = "|"
for i in range(array.shape[0]):
for j in range(array.shape[1]):
s = s + str(int(array[i, j]))
s = s + "|"
return str(s)
|
#!/usr/local/bin/python
__metaclass__=type
class Rectangle:
def __init__(self):
self.width=0
self.height=0
def setSize(self,size):
self.width,self.height=size
def getSize(self):
return self.width,self.height
size=property(getSize,setSize)
r=Rectangle()
r.width=15
r.height=10
print r.size
r.size=(150,100)
print r.size
|
#!/usr/bin/env python
#
# description: Toeplitz Matrix
# difficulty: Easy
# leetcode_num: 766
# leetcode_url: https://leetcode.com/problems/toeplitz-matrix/
#
# Given an m x n matrix, return true if the matrix is Toeplitz. Otherwise, return false.
# A matrix is Toeplitz if every diagonal from top-left to bottom-right has the same elements.
# Example 1:
#
# Input: matrix = [[1,2,3,4],[5,1,2,3],[9,5,1,2]]
# Output: true
# Explanation:
# In the above grid, the diagonals are:
# "[9]", "[5, 5]", "[1, 1, 1]", "[2, 2, 2]", "[3, 3]", "[4]".
# In each diagonal all elements are the same, so the answer is True.
# Example 2:
#
# Input: matrix = [[1,2],[2,2]]
# Output: false
# Explanation:
# The diagonal "[1, 2]" has different elements.
#
# Constraints:
# m == matrix.length
# n == matrix[i].length
# 1 <= m, n <= 20
# 0 <= matrix[i][j] <= 99
def IsToeplitzMatrix(matrix):
if len(matrix) == 0:
return True
rows = len(matrix)
cols = len(matrix[0])
for i in range(rows):
for j in range(cols):
# Simply check if previous top left element is the same or not
if i-1 >= 0 and j-1 >= 0 and matrix[i-1][j-1] != matrix[i][j]:
return False
return True
if __name__ == '__main__':
test_cases = [
([[1, 2, 3, 4], [5, 1, 2, 3], [9, 5, 1, 2]], True),
([[1, 2], [2, 2]], False)
]
for matrix, res in test_cases:
assert IsToeplitzMatrix(matrix) == res, 'Test Failed'
|
from gpvolve.cluster.utils import assignment, membership, crispness, cluster_assignments
from gpvolve.cluster.pcca import cluster
from gpvolve.cluster.opt import optimize
import gpvolve.cluster.criteria as criteria
|
from rest_framework import serializers
from .models import Profile
class RequestsSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = (
'code',
'full_name',
'email',
'phone'
)
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = (
'code',
'full_name',
'position',
'phone',
'email',
'level',
)
class ProfileListSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = Profile
fields = (
'code',
'full_name',
'position',
'phone',
'email',
)
class ProfileDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = (
'code',
'full_name',
'position',
'phone',
'email',
'level',
)
class ProfileCreateSerializer(serializers.ModelSerializer):
code = serializers.CharField(read_only=True)
def create(self, validated_data):
profile = Profile(**validated_data)
profile.set_password(validated_data.get('password'))
profile.is_active = True
if profile.level == 'Супер Администратор':
profile.is_staff = True
profile.is_superuser = True
profile.save()
return profile
def update(self, instance, validated_data):
instance.full_name = validated_data.get(
'full_name', instance.full_name)
instance.position = validated_data.get('position', instance.position)
instance.phone = validated_data.get('phone', instance.phone)
instance.email = validated_data.get('email', instance.email)
instance.level = validated_data.get('level', instance.level)
try:
if not instance.check_password(validated_data.get('password')):
instance.set_password(validated_data.get('password'))
except:
...
instance.save()
return instance
class Meta:
model = Profile
fields = (
'code',
'full_name',
'position',
'phone',
'email',
'level',
'password'
)
|
import sys
input = sys.stdin.readline
sizes = {"S": 1, "M": 2, "L": 3}
jers = int(input())
athl = int(input())
stuff = [input().rstrip() for n in range(jers)]
works = 0
for _ in range(athl):
s = input().split()
s[1] = int(s[1]) - 1
if (stuff[s[1]] != 0 and sizes[stuff[s[1]]] >= sizes[s[0]]):
works += 1
stuff[s[1]] = 0
print(works) |
#UserName List Generator - D.Patrick Dugan
#There probably is a more pythonic way to do this...
import argparse
import sys
class UserNameGen(object):
def __init__(self, username, usersfile, outfile, numappend, email):
self.username = username
self.usersfile = usersfile
self.outfile = outfile
self.numappend = numappend
self.email = email
#string functions here
def FirstLast(self, firstName, lastName):
return firstName[0].upper()+firstName[1:]+lastName[0]+lastName[1:]
def FirstDotLast(self, firstName, lastName):
return firstName[0].upper()+firstName[1:]+'.'+lastName[0]+lastName[1:]
def First_Last(self, firstName, lastName):
return firstName[0].upper()+firstName[1:]+'_'+lastName[0]+lastName[1:]
def FLast(self, firstName, lastName):
return firstName[0]+lastName[0]+lastName[1:]
def FDotLast(self, firstName, lastName):
return firstName[0]+'.'+lastName[0]+lastName[1:]
def F_Last(self, firstName, lastName):
return firstName[0]+'_'+lastName[0]+lastName[1:]
def FirstL(self, firstName, lastName):
return firstName[0].upper()+firstName[1:]+lastName[0]
def FirstDotL(self, firstName, lastName):
return firstName[0].upper()+firstName[1:]+'.'+lastName[0]
def First_L(self, firstName, lastName):
return firstName[0].upper()+firstName[1:]+'_'+lastName[0]
def FirLas(self, firstName, lastName):
return firstName[0].upper()+firstName[1:3]+lastName[0].upper()+lastName[1:3]
def FirDotLas(self, firstName, lastName):
return firstName[0].upper()+firstName[1:3]+'.'+lastName[0].upper()+lastName[1:3]
def Fir_Las(self, firstName, lastName):
return firstName[0].upper()+firstName[1:3]+'_'+lastName[0].upper()+lastName[1:3]
def FiLast(self, firstName, lastName):
return firstName[0].upper()+firstName[1:2]+lastName[0].upper()+lastName[1:]
def FiDotLast(self, firstName, lastName):
return firstName[0].upper()+firstName[1:2]+'.'+lastName[0].upper()+lastName[1:]
def Fi_Last(self, firstName, lastName):
return firstName[0].upper()+firstName[1:2]+'_'+lastName[0].upper()+lastName[1:]
def LastFirst(self, firstName, lastName):
return lastName[0].upper()+lastName[1:]+firstName[0].upper()+firstName[1:]
def LastDotFirst(self, firstName, lastName):
return lastName[0].upper()+lastName[1:]+'.'+firstName[0].upper()+firstName[1:]
def Last_First(self, firstName, lastName):
return lastName[0].upper()+lastName[1:]+'_'+firstName[0].upper()+firstName[1:]
def LasFir(self, firstName, lastName):
return lastName[0].upper()+lastName[1:3]+firstName[0].upper()+firstName[1:3]
def LasDotFir(self, firstName, lastName):
return lastName[0].upper()+lastName[1:3]+'.'+firstName[0].upper()+firstName[1:3]
def Las_Fir(self, firstName, lastName):
return lastName[0].upper()+lastName[1:3]+'_'+firstName[0].upper()+firstName[1:3]
#same methods but all lowercase
def firstlast(self, firstName, lastName):
return firstName[0].lower()+firstName[1:]+lastName[0].lower()+lastName[1:]
def firstDotlast(self, firstName, lastName):
return firstName[0].lower()+firstName[1:]+'.'+lastName[0].lower()+lastName[1:]
def first_last(self, firstName, lastName):
return firstName[0].lower()+firstName[1:]+'_'+lastName[0].lower()+lastName[1:]
def flast(self, firstName, lastName):
return firstName[0].lower()+lastName[0].lower().lower()+lastName[1:]
def fDotlast(self, firstName, lastName):
return firstName[0].lower()+'.'+lastName[0].lower()+lastName[1:]
def f_last(self, firstName, lastName):
return firstName[0].lower()+'_'+lastName[0].lower()+lastName[1:]
def firstl(self, firstName, lastName):
return firstName[0].lower()+firstName[1:]+lastName[0].lower()
def firstDotl(self, firstName, lastName):
return firstName[0].lower()+firstName[1:]+'.'+lastName[0].lower()
def first_l(self, firstName, lastName):
return firstName[0].lower()+firstName[1:]+'_'+lastName[0].lower()
def firlas(self, firstName, lastName):
return firstName[0].lower()+firstName[1:3]+lastName[0].lower()+lastName[1:3]
def firDotlas(self, firstName, lastName):
return firstName[0].lower()+firstName[1:3]+'.'+lastName[0].lower()+lastName[1:3]
def fir_las(self, firstName, lastName):
return firstName[0].lower()+firstName[1:3]+'_'+lastName[0].lower()+lastName[1:3]
def filast(self, firstName, lastName):
return firstName[0].lower()+firstName[1:2]+lastName[0].lower()+lastName[1:]
def fiDotlast(self, firstName, lastName):
return firstName[0].lower()+firstName[1:2]+'.'+lastName[0].lower()+lastName[1:]
def fi_last(self, firstName, lastName):
return firstName[0].lower()+firstName[1:2]+'_'+lastName[0].lower()+lastName[1:]
def lastfirst(self, firstName, lastName):
return lastName[0].lower()+lastName[1:]+firstName[0].lower()+firstName[1]
def lastDotfirst(self, firstName, lastName):
return lastName[0].lower()+lastName[1:]+'.'+firstName[0].lower()+firstName[1:]
def last_first(self, firstName, lastName):
return lastName[0].lower()+lastName[1:]+'_'+firstName[0].lower()+firstName[1:]
def lasfir(self, firstName, lastName):
return lastName[0].lower()+lastName[1:3]+firstName[0].lower()+firstName[1:3]
def lasDotfir(self, firstName, lastName):
return lastName[0].lower()+lastName[1:3]+'.'+firstName[0].lower()+firstName[1:3]
def las_fir(self, firstName, lastName):
return lastName[0].lower()+lastName[1:3]+'_'+firstName[0].lower()+firstName[1:3]
@staticmethod
def outputUserName(lineEntry, f=None):
if f is None:
print(lineEntry)
else:
f.write(lineEntry + '\n')
def run(self):
usernames = [self.username]
self.generate_user_names(usernames)
def load_users_file(self):
with open(self.usersfile) as names:
usernames = [line.strip() for line in names]
self.generate_user_names(usernames)
def generate_user_names(self, usernames):
# if self.numappend is False:
functions = [
self.FirstLast, self.FirstDotLast, self.First_Last,
self.FLast, self.FDotLast, self.F_Last,
self.FirstL, self.FirstDotL, self.First_L,
self.FirLas, self.FirDotLas, self.Fir_Las,
self.FiLast, self.FiDotLast, self.Fi_Last,
self.LastFirst, self.LastDotFirst, self.Last_First,
self.LasFir, self.LasDotFir, self.Las_Fir,
#lowercase functions
self.firstlast, self.firstDotlast, self.first_last,
self.flast, self.fDotlast, self.f_last,
self.firstl, self.firstDotl, self.first_l,
self.firlas, self.firDotlas, self.fir_las,
self.filast, self.fiDotlast, self.fi_last,
self.lastfirst, self.lastDotfirst, self.last_first,
self.lasfir, self.lasDotfir, self.las_fir,
]
if self.outfile is not None:
f = open(self.outfile, 'w+')
else:
f = None
for name in usernames:
try:
firstName, lastName = name.split()
for fn in functions:
lineEntry = fn(firstName, lastName)
if self.numappend:
if self.email:
self.outputUserName(lineEntry+self.email, f)
numrange = self.numappend.split(',')
for num in range(int(numrange[0]), int(numrange[1])+1):
if self.email:
self.outputUserName(lineEntry+str(num)+self.email, f)
else:
self.outputUserName(lineEntry+str(num), f)
elif self.email:
self.outputUserName(lineEntry+self.email, f)
else:
self.outputUserName(lineEntry, f)
except Exception as e:
print(e)
continue
if f is not None:
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help= True, description= "Generates a list of usernames based off of standard naming conventions.")
parser.add_argument('-u','--username', help="Name of the user to enumerate. 'First Last' format")
parser.add_argument('-U','--usersfile', help="File with names to generate list in 'First Last' format")
parser.add_argument('-o','--outfile', action='store', help= "File to save generated usernames in.")
parser.add_argument('-n', help='Adds number range to every naming convention. Must be in "x,y" format.')
parser.add_argument('-e', '--email', help="Appends '@domain.com' to all generated usernames")
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
try:
executer = UserNameGen(args.username, args.usersfile, args.outfile, args.n, args.email)
if executer.usersfile is not None:
executer.load_users_file()
elif executer.username is not None:
executer.run()
else:
parser.print_help()
except Exception as e:
print(e) |
# -*- coding: utf-8 -*-
import os
import logging
import time
import torch
import cv2
import numpy as np
class Solver(object):
def __init__(self,
task_name,
torch_module,
trainset_dataiter,
net,
net_initializer,
optimizer,
lr_scheduler,
gpu_id_list,
num_train_loops,
loss_criterion,
train_metric,
display_interval=10,
val_evaluation_interval=100,
valset_dataiter=None,
val_metric=None,
num_val_loops=0,
pretrained_model_param_path=None,
save_prefix=None,
start_index=0,
model_save_interval=None,
train_metric_update_frequency=1):
self.task_name = task_name
self.torch_module = torch_module
self.trainset_dataiter = trainset_dataiter
self.valset_dataiter = valset_dataiter
self.net = net
self.net_initializer = net_initializer
self.gpu_id_list = gpu_id_list
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.num_train_loops = num_train_loops
self.num_val_loops = num_val_loops
self.loss_criterion = loss_criterion
self.train_metric = train_metric
self.val_metric = val_metric
self.display_interval = display_interval
self.val_evaluation_interval = val_evaluation_interval
self.save_prefix = save_prefix
self.start_index = start_index
self.pretrained_model_param_path = pretrained_model_param_path
self.model_save_interval = model_save_interval
self.train_metric_update_frequency = \
train_metric_update_frequency if train_metric_update_frequency <= \
display_interval else display_interval
def fit(self):
logging.info('Start training in gpu %s.-----------', str(self.gpu_id_list))
sum_time = 0
step_ = 0
for i in range(self.start_index + 1, self.num_train_loops + 1):
start = time.time()
batch = self.trainset_dataiter.next()
images = batch.data[0].cuda()
targets = batch.label
time.sleep(0.01)
#-------------------------------------------------------------------
# print('images size : ',images.size())
# imgs = images.cpu().numpy().transpose(0,2,3,1).astype(np.uint8)
#
# for k in range(imgs.shape[0]):
# img_show = imgs[k,:,:,:]
# cv2.namedWindow('imagex',0)
# cv2.imshow('imagex',img_show)
# cv2.waitKey(0)
images = (images - 127.5) / 127.5
outputs = self.net(images)
loss, loss_branch = self.loss_criterion(outputs, targets)
# print('----->>> outputs len : ',len(outputs))
# print('----->>> targets len : ',len(targets))
# for k in range(len(targets)):
# print(k,') ',len(targets[k]))
# for m in range(len(targets[k])):
# print(targets[k][m].size())
# update parameters------------------------------------------------
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.lr_scheduler.step()
time.sleep(0.2)
"""the train_metric need to debug"""
# display training process----------------------------------------
if i % self.train_metric_update_frequency == 0:
self.train_metric.update(loss_branch)
time.sleep(0.01)
sum_time += (time.time() - start)
if i % self.display_interval == 0:
names, values = self.train_metric.get()
logging.info('Iter[%d] -- Time elapsed: %.1f s. Speed: %.1f images/s.',
i, sum_time, self.display_interval * \
self.trainset_dataiter.get_batch_size() / sum_time)
for name, value in zip(names, values):
logging.info('%s: --> %.4f', name, value)
logging.info('total loss = %.4f', loss * 10000)
self.train_metric.reset()
sum_time = 0
step_ += 1
# evaluate the validation set
if False:#i % self.val_evaluation_interval == 0 and self.num_val_loops:
with torch.no_grad():
logging.info('Start validating---------------------------')
for val_loop in range(self.num_val_loops):
val_batch = self.valset_dataiter.next()
val_images = val_batch[0].cuda()
val_targets = val_batch[1:].cuda()
val_outputs = self.net(val_images)
self.val_metric.update(val_outputs, val_targets)
names, values = self.val_metric.get()
logging.info('Iter[%d] validation metric -------------', i)
for name, value in zip(names, values):
logging.info('%s: --> %.4f', name, value)
logging.info('End validating ----------------------------')
self.val_metric.reset()
# save model-----------------------------------------------------
if i % self.model_save_interval == 0:
torch.save(self.net.state_dict(),
self.save_prefix + '/' + self.task_name + \
'_{}.pth'.format(step_))
|
import time
import subprocess
import smbus
import Adafruit_SSD1306 as ssd1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
BUS = 1
ADDR = 0x3C
RST = None
oled = ssd1306.SSD1306_128_32(rst=RST, i2c_address=ADDR)
oled.begin()
oled.clear()
oled.display()
width = oled.width
height = oled.height
image = Image.new('1', (width, height))
draw = ImageDraw.Draw(image)
draw.rectangle((0,0,width,height), outline=0, fill=0)
font = ImageFont.load_default()
padding = -2
top = padding
bottom = height - padding
x = 0
while True:
draw.rectangle((0,0,width,height), outline=0, fill=0)
cmd = "hostname -I | cut -d\' \' -f1"
ip = subprocess.check_output(cmd, shell=True)
cmd = "top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'"
cpu = subprocess.check_output(cmd, shell=True)
cmd = "free -m | awk 'NR==2{printf \"Mem: %s/%sMB %.2f%%\", $3, $2, $3*100/$2 }'"
mem = subprocess.check_output(cmd, shell=True)
cmd = "df -h | awk '$NF==\"/\"{printf \"Disk: %d/%dGB %s\", $3, $2, $5}'"
disk = subprocess.check_output(cmd, shell=True)
cmd = "vcgencmd measure_temp"
temp = subprocess.check_output(cmd, shell=True)
draw.text((x, top), "IP: {}".format(ip.decode('utf-8')), font=font, fill=255)
draw.text((x, top+8), "{}".format(cpu.decode('utf-8')), font=font, fill=255)
draw.text((x, top+16), "{}".format(mem.decode('utf-8')), font=font, fill=255)
draw.text((x, top+25), "{}".format(temp.decode('utf-8')), font=font, fill=255)
oled.image(image)
oled.display()
time.sleep(3)
|
import random
def data() :
return [{'signal_count': n} for n in range(random.randint(50,1000)) for l in range(0, random.randint(1,100))]
|
from typing import Optional, Tuple
from . import BaseHandler, apply_request_schema, apply_response_schema
from ..aliases import AliasStoreType, RevealFailed
from ..aliases.manager import redact, reveal
from ..schemas.aliases import (
AliasResponseSchema,
AliasesResponseSchema,
RedactRequestSchema,
)
STORAGE_TYPE = AliasStoreType.PERSISTENT
class AliasesHandler(BaseHandler):
@apply_request_schema(RedactRequestSchema)
@apply_response_schema(AliasResponseSchema)
def post(self, validated_data: dict):
results = []
for item in validated_data['data']:
value, format = item['value'], item['format']
alias = redact(value, format, STORAGE_TYPE)
results.append({
'aliases': [{'alias': alias.public_alias, 'format': format}],
'created_at': alias.created_at,
'value': item['value'],
})
return {'data': results}
@apply_response_schema(AliasesResponseSchema)
def get(self):
aliases = self.get_query_argument('q', default=None)
if not aliases:
self.set_status(400, 'Invalid request data')
self.finish('Missing required parameter: "q"')
return
reveal_data = {}
errors = []
for public_alias in set(aliases.split(',')):
reveal_result, error = _reveal(public_alias)
if reveal_result:
reveal_data[public_alias] = reveal_result
if error:
errors.append(error)
result = {}
if reveal_data:
result['data'] = reveal_data
if errors:
result['errors'] = errors
return result
class AliasHandler(BaseHandler):
@apply_response_schema(AliasResponseSchema)
def get(self, public_alias: str):
reveal_result, error = _reveal(public_alias)
if error:
self.set_status(400, 'Invalid request data')
return {'errors': error}
return {'data': [reveal_result]}
def _reveal(public_alias: str) -> Tuple[Optional[str], Optional[dict]]:
try:
alias = reveal(public_alias, STORAGE_TYPE)
except RevealFailed as exc:
return None, {'detail': f'Unable to reveal {public_alias}: {exc}'}
return {
'aliases': [{
'alias': alias.public_alias,
'format': alias.alias_generator,
}],
'created_at': alias.created_at,
'value': alias.value,
}, None
|
from django.conf.urls import url
from django.contrib import admin
from django.views.generic.base import RedirectView
from django.conf import settings
from django.conf.urls.static import static
from .views import (
RetweetView,
TweetListView,
TweetDetailView,
TweetCreateView,
TweetUpdateView,
TweetDeleteView,
)
urlpatterns = [
url(r'^(?P<pk>\d+)/delete/$', TweetDeleteView.as_view(), name='delete'),
url(r'^(?P<pk>\d+)/update/$', TweetUpdateView.as_view(), name='update'),
url(r'^(?P<pk>\d+)/retweet/$', RetweetView.as_view(), name='retweet'),
url(r'^(?P<pk>\d+)/$', TweetDetailView.as_view(), name='detail'),
url(r'^search/$', TweetListView.as_view(), name='list'),
url(r'^create/$', TweetCreateView.as_view(), name='create'),
url(r'^$', RedirectView.as_view(url="/")),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Evangelos A. Dimopoulos, Evan K. Irving-Pease"
__copyright__ = "Copyright 2020, University of Oxford"
__email__ = "antonisdim41@gmail.com"
__license__ = "MIT"
import os
import sys
import numpy as np
import pandas as pd
from haystac.workflow.scripts.utilities import REGEX_BLACKLIST
def entrez_refseq_eukaryotes_create_files(
config,
input_file,
euk_genomes_out,
):
"""Function to parse the refseq genomes report for eukaryotes."""
# read the file
refseq_eukaryotes = pd.read_csv(input_file, sep="\t")
# drop duplicate species/strains and get pairs of taxa and a assembly accession codes
refseq_eukaryotes_rmdup = refseq_eukaryotes[~refseq_eukaryotes["#Organism/Name"].duplicated(keep="first")]
eukaryotes_unique = refseq_eukaryotes_rmdup[["#Organism/Name", "Assembly Accession"]].copy()
# drop rows that have no accessions
means_empty_record = ["", "-", np.nan]
eukaryotes = eukaryotes_unique.loc[~eukaryotes_unique["Assembly Accession"].isin(means_empty_record)]
# rename columns
eukaryotes.rename(columns={"#Organism/Name": "species", "Assembly Accession": "AccessionVersion"}, inplace=True)
# regex for species name
eukaryotes["species"] = eukaryotes["species"].replace(REGEX_BLACKLIST, "_", regex=True)
# check for duplicates from user input
if config["sequences"] or config["accessions"]:
user_inputs = []
if os.path.isfile(config["sequences"]):
custom_fasta_paths = pd.read_csv(
config["sequences"],
sep="\t",
header=None,
names=["species", "accession", "path"],
)
user_inputs.append(custom_fasta_paths)
if os.path.isfile(config["accessions"]):
custom_accessions = pd.read_csv(
config["accessions"],
sep="\t",
header=None,
names=["species", "accession"],
)
user_inputs.append(custom_accessions)
for user_df in user_inputs:
eukaryotes = eukaryotes[(~eukaryotes["species"].isin(user_df["species"]))]
# print the output to csv
header = ["species", "AccessionVersion"]
eukaryotes.to_csv(euk_genomes_out, sep="\t", header=header, index=False)
if __name__ == "__main__":
# noinspection PyUnresolvedReferences
sys.stderr = open(snakemake.log[0], "w")
# noinspection PyUnresolvedReferences
entrez_refseq_eukaryotes_create_files(
config=snakemake.config,
input_file=snakemake.input[0],
euk_genomes_out=snakemake.output[0],
)
|
from isodate import parse_duration
from isoduration import parse_duration as isodate_parse_duration
def test_isoduration(benchmark):
benchmark(parse_duration, "P18Y9M4DT11H9M8S")
def test_isodate(benchmark):
benchmark(isodate_parse_duration, "P18Y9M4DT11H9M8S")
|
from headers import Headers
from url import Url
class Response(object):
"""
The ``Response`` object encapsulates HTTP style responses.
"""
def __init__(self, status, headers=Headers(), content=None,
message=None, request=None):
"""
Construct a new ``Response`` object.
:param status: HTTP status code for the response
:type status: integer
:param headers: HTTP headers
:type status: a list of tuples or a class:`Headers` object
:param content: content
:param message: HTTP message for the response
:param request: origin Request object used
:type request: class:`Request`
.. attribute:: redirects
List of redirections
"""
self._status = status
self.message = message
self.redirects = list()
if (not isinstance(headers, Headers)):
headers = Headers(headers)
self._headers = headers
self._content = content
self._request = request
@property
def status(self):
"""
Returns the HTTP status
:rtype: int
"""
return int(self._status)
@property
def is_info(self):
"""
Returns if the response was informational
:rtype: boolean
"""
if self.status >= 100 and self.status < 200:
return True
return False
@property
def is_success(self):
"""
Returns if the response was success
:rtypen: boolean
"""
if self.status >= 200 and self.status < 300:
return True
return False
@property
def is_redirect(self):
"""
Returns if the response was redirect
:rtype: boolean
"""
if self.status >= 300 and self.status < 400:
return True
return False
@property
def is_client_error(self):
"""
Returns if the response was a client error
:rtype: boolean
"""
if self.status >= 400 and self.status < 500:
return True
return False
@property
def is_server_error(self):
"""
Returns if the response was a client server
:rtype: boolean
"""
if self.status >= 500 and self.status < 600:
return True
@property
def is_error(self):
"""
Returns if the response was an error
:rtype: boolean
"""
if self.is_client_error or self.is_server_error:
return True
return False
@property
def base(self):
"""
Returns the base URI for this response
:rtype: class:`Url` or None
"""
url = None
if self.header('Content-Base'):
url = self.header('Content-Base')
if self.header('Content-Location'):
url = self.header('Content-Location')
if url is None and self.request:
url = self.request.url
if not url:
return None
if not isinstance(url, Url):
url = Url(url)
return url
@property
def request(self):
"""
Returns the request object that caused that response
:rtype: class:`Request`
"""
return self._request
@property
def content(self):
"""
Returns the actual content of the response
:rtype: string
"""
return self._content
@content.setter
def content(self, content):
"""
Set the actual content of the response
"""
self._content = content
def header(self, name):
"""
Returns the value for a given header
:rtype: string
"""
return self._headers.get(name)
@property
def headers(self):
"""
Returns the class:`Headers` object
:rtype: class:`Headers`
"""
return self._headers
@property
def status_line(self):
"""
Returns the string '<code> <message>'
:rtype: string
"""
return "{0} {1}".format(self.status, self.message)
@property
def last_modified(self):
"""
Returns a datetime object represeting the *Last-Modified* header
:rtype: class:`datetime`
"""
return self._headers.last_modified
@property
def date(self):
"""
Returns a datetime object represeting the *Date* header
:rtype: class:`datetime`
"""
return self._headers.date
@property
def expires(self):
"""
Returns a datetime object represeting the *Expires* header
:rtype: class:`datetime`
"""
return self._headers.expires
@property
def content_length(self):
"""
Returns the content-length of the actual response
:rtype: int
"""
return self._headers.content_length
@property
def content_is_text(self):
"""
Returns ``True`` if the "Content-Type" header is set to text
:rtype: boolean
"""
return self._headers.content_is_text
@property
def content_is_xml(self):
"""
Returns ``True`` if the "Content-Type" header is set to XML
:rtype: boolean
"""
return self._headers.content_is_xml
@property
def content_is_xhtml(self):
"""
Returns True if the "Content-Type" header is set to XHTML
:rtype: boolean
"""
return self._headers.content_is_xhtml
|
import argparse
import serial
import time
BAUD = 115200 * 2
SLEEP_AFTER_RESET_IN_S = 0.02
# This is modified when reading RAM and flash.
SLEEP_BETWEEN_READ_AND_WRITE_IN_S = 0.02
RAM_SIZE = 0x010000
FLASH_SIZE = 0x7d000
serial_port = None
def hexdump(bytearr):
return ' '.join(f'{b:02x}' for b in bytearr)
def make_read_request(addr, n_bytes):
return [0x5a, addr >> 8, addr & 0xff, 0x80] + [0xff] * (n_bytes + 1)
def make_write_request(addr, data):
return [0x5a, addr >> 8, addr & 0xff, 0x00] + data + [0xff]
def request_payload_size(request):
"Discounts the leading 4 bytes and trailing 1 byte from read and write requests."
return len(request) - 5
def validate_raw_response(request, raw_response):
return all([
raw_response[0] == 0x02,
raw_response[1] == request[0],
raw_response[2] == 0x00,
raw_response[3] == request[1],
raw_response[4] == 0x00,
raw_response[5] == request[2],
raw_response[6] == 0x00,
raw_response[7] == request[3],
raw_response[-2] == 0x02,
raw_response[-1] == 0xff,
])
def parse_response(raw_response, n_bytes):
"Extracts the data bytes from the raw response."
return [
raw_response[(i+4)*2 + 1]
for i in range(n_bytes)
]
def write_and_read(data):
serial_port.write(data)
time.sleep(SLEEP_BETWEEN_READ_AND_WRITE_IN_S)
return serial_port.read_all()
def write_and_read_cmd(cmd, data=None):
cmd_bytes = bytearray([0x55, cmd] if data is None else [0x55, cmd] + data)
return write_and_read(cmd_bytes)
def write_and_read_data(request):
payload_size = request_payload_size(request)
data_bytes = bytearray(request)
raw_response = write_and_read(data_bytes)
if not validate_raw_response(request, raw_response):
raise Exception(
f"Invalid request/raw response pair:\n\t{hexdump(request)}\n\t{hexdump(raw_response)}")
return parse_response(raw_response, payload_size)
def get_soc_id():
res = write_and_read_data(make_read_request(0x007e, 2))
return res[1] << 8 | res[0]
# res = write_and_read_data(make_read_request(0x007e, 1))
# return res[0]
def set_speed(speed):
"""The SWS speed is set in the 0x00b2 register by means of
specifying the number of clocks per bit."""
# return write_and_read_data(make_write_request(0x00b0, [0x00, 0x80, speed, 0x00]))
return write_and_read_data(make_write_request(0x00b2, [speed]))
def set_pgm_speed(speed):
return write_and_read_cmd(0x05, [speed])
def find_suitable_sws_speed():
for speed in range(2, 0x7f):
print(f'Trying speed {speed}')
set_speed(speed)
# Try to make a read request. If we get a valid response, assume we've
# found a suitable SWS speed.
try:
get_soc_id()
except Exception:
continue
else:
print(f'Found and set suitable SWS speed: {speed}')
return speed
raise RuntimeError("Unable to find a suitable SPI speed")
def send_flash_write_enable():
# CNS low.
write_and_read_data(make_write_request(0x0d, [0x00]))
# Write enable.
write_and_read_data(make_write_request(0x0c, [0x06]))
# CNS high.
write_and_read_data(make_write_request(0x0d, [0x01]))
def send_flash_erase():
# CNS low.
write_and_read_data(make_write_request(0x0d, [0x00]))
# Write enable.
write_and_read_data(make_write_request(0x0c, [0x60]))
# CNS high.
write_and_read_data(make_write_request(0x0d, [0x01]))
def send_flash_get_status():
# CNS low.
write_and_read_data(make_write_request(0x0d, [0x00]))
# Get flash status command.
write_and_read_data(make_write_request(0x0c, [0x05]))
# Start SPI.
write_and_read_data(make_write_request(0x0c, [0xff]))
# Read the status byte.
res = write_and_read_data(make_read_request(0x0c, 1))
# CNS high.
write_and_read_data(make_write_request(0x0d, [0x01]))
return res
def send_cpu_stop():
return write_and_read_data(make_write_request(0x0602, [0x05]))
def send_csn_high():
return write_and_read_data(make_write_request(0x000d, [0x01]))
def send_csn_low():
return write_and_read_data(make_write_request(0x000d, [0x00]))
def dump_ram():
contents = []
CHUNK_SIZE = 32
for addr in range(0x00, RAM_SIZE, CHUNK_SIZE):
# Report progress.
if addr & 0xff == 0:
print(f'0x{addr:04x} {100 * addr / RAM_SIZE:05.2f}%')
contents.extend(write_and_read_data(
make_read_request(addr, CHUNK_SIZE)))
return contents
def read_flash(addr, chunk_size):
contents = []
# send_flash_write_enable()
# CNS low.
write_and_read_data(make_write_request(0x0d, [0x00]))
# Read command.
write_and_read_data(make_write_request(0x0c, [0x03]))
write_and_read_data(make_write_request(0x0c, [(addr >> 16) & 0xff]))
write_and_read_data(make_write_request(0x0c, [(addr >> 8) & 0xff]))
write_and_read_data(make_write_request(0x0c, [addr & 0xff]))
# FIFO mode.
write_and_read_data(make_write_request(0xb3, [0x80]))
for i in range(chunk_size):
write_and_read_data(make_write_request(0x0c, [0xff]))
res = write_and_read_data(make_read_request(0x0c, 1))
assert len(res) == 1
contents.extend(res)
# RAM mode.
write_and_read_data(make_write_request(0xb3, [0x00]))
# CNS high.
write_and_read_data(make_write_request(0x0d, [0x01]))
return contents
def write_flash(addr, data):
send_flash_write_enable()
# CNS low.
write_and_read_data(make_write_request(0x0d, [0x00]))
# Write command.
write_and_read_data(make_write_request(0x0c, [0x02]))
write_and_read_data(make_write_request(0x0c, [(addr >> 16) & 0xff]))
write_and_read_data(make_write_request(0x0c, [(addr >> 8) & 0xff]))
write_and_read_data(make_write_request(0x0c, [addr & 0xff]))
# FIFO mode.
write_and_read_data(make_write_request(0xb3, [0x80]))
# Write data
# CPU stop?
write_and_read_data(make_write_request(0x0c, data))
# # RAM mode.
write_and_read_data(make_write_request(0xb3, [0x00]))
# CNS high.
write_and_read_data(make_write_request(0x0d, [0x01]))
def dump_flash(debug):
contents = []
CHUNK_SIZE = 16
for addr in range(0x00, FLASH_SIZE, CHUNK_SIZE):
# Report progress.
if addr & 0xff == 0:
print(f'0x{addr:06x} {100 * addr / FLASH_SIZE:05.2f}%')
# Retry the same address in case something goes wrong.
while True:
try:
res = read_flash(addr, CHUNK_SIZE)
if debug:
print(f'Read: {hexdump(res)}')
contents.extend(res)
break
except Exception as e:
print(f"Retrying 0x{addr:08x}... {e}")
return contents
def write_to_file(filename, contents):
print(f"Writing {len(contents)} bytes to {filename}")
with open(filename, 'wb') as f:
f.write(bytes(contents))
def init_soc(sws_speed=None):
# Set RST to low - turns of the SoC.
write_and_read_cmd(0x00)
# Set RST high - starts to turn on the SoC.
# write_and_read_cmd(0x01)
# Give some time for the reset capacitor to charge and turn the chip on.
time.sleep(SLEEP_AFTER_RESET_IN_S)
# Send an "activate" command. The STM32 will receive this command and put the Telink
# in a suitable state. The STM32 will stop the Telink CPU by writing the value
# 0x05 to Telink's 0x0602 register. It will also set a default SWS speed, but we
# will override it later when we find a suitable SWS speed.
write_and_read_cmd(0x02, [0x00, 0xf0])
set_pgm_speed(0x03)
if sws_speed is not None:
set_speed(sws_speed)
else:
find_suitable_sws_speed()
def dump_flash_main(args):
init_soc(args.sws_speed)
print(f'Dumping flash to {args.filename}...')
# Speed things up a little bit.
global SLEEP_BETWEEN_READ_AND_WRITE_IN_S
SLEEP_BETWEEN_READ_AND_WRITE_IN_S = 0.001
write_to_file(args.filename, dump_flash(args.debug))
def erase_flash_main(args):
init_soc(args.sws_speed)
print(f'Erasing flash...')
send_flash_write_enable()
send_flash_erase()
while True:
res = send_flash_get_status()
print(f'Flash status: {hexdump(res)}')
if res[0] == 0:
break
time.sleep(1)
# CNS high.
# write_and_read_data(make_write_request(0x0d, [0x01]))
def write_flash_main(args):
init_soc(args.sws_speed)
time.sleep(0.02)
print(f'Erasing flash...')
send_flash_write_enable()
send_flash_erase()
while True:
res = send_flash_get_status()
print(f'Flash status: {hexdump(res)}')
if res[0] == 0:
break
time.sleep(1)
print(f'Writing flash from {args.filename}...')
global SLEEP_BETWEEN_READ_AND_WRITE_IN_S
# SLEEP_BETWEEN_READ_AND_WRITE_IN_S = 0.008
SLEEP_BETWEEN_READ_AND_WRITE_IN_S = 0.005
CHUNK_SIZE = 16
with open(args.filename, 'rb') as f:
contents = f.read()
size = len(contents)
for addr in range(0x00, size, CHUNK_SIZE):
# Report progress.
if addr & 0xff == 0:
print(f'0x{addr:04x} {100 * addr / size:05.2f}%')
data = contents[addr:min(addr + CHUNK_SIZE, size)]
if args.debug:
print(f'writing: {hexdump(data)}')
write_flash(addr, list(data))
while True:
res = send_flash_get_status()
print(f'Flash status: {hexdump(res)}')
if res[0] == 0:
break
time.sleep(1)
# Set RST to low - turns off the SoC.
write_and_read_cmd(0x00)
# Set RST to high - turns on the SoC.
write_and_read_cmd(0x01)
def dump_ram_main(args):
init_soc(args.sws_speed)
print(f'Dumping ram to {args.filename}...')
# Speed things up a little bit.
global SLEEP_BETWEEN_READ_AND_WRITE_IN_S
SLEEP_BETWEEN_READ_AND_WRITE_IN_S = 0.00
write_to_file(args.filename, dump_ram())
def get_soc_id_main(args):
init_soc(args.sws_speed)
print(f'SOC ID: 0x{get_soc_id():04x}')
def cpu_run_main(args):
init_soc(args.sws_speed)
# Tell CPU to run.
write_and_read_data(make_write_request(0x0602, [0x88]))
def main():
args_parser = argparse.ArgumentParser(description='TLSR')
args_parser.add_argument('--serial-port', type=str, required=True,
help="Serial port to use - this should be the USB CDC port that is connected to the STM32 (e.g.: /dev/cu.usbmodem6D8E448E55511.")
args_parser.add_argument(
'--sws-speed', type=int, help="SWS speed in the range [0x02, 0x7f]. If not provided, the script will try to find a suitable SWS speed automatically.")
args_parser.add_argument(
'--debug', action="store_true", help="Enabled debugging information.")
subparsers = args_parser.add_subparsers(dest="cmd", required=True)
dump_flash_parser = subparsers.add_parser('dump_flash')
dump_flash_parser.set_defaults(func=dump_flash_main)
dump_flash_parser.add_argument('filename', type=str)
dump_ram_parser = subparsers.add_parser('dump_ram')
dump_ram_parser.set_defaults(func=dump_ram_main)
dump_ram_parser.add_argument('filename', type=str)
get_soc_id_parser = subparsers.add_parser('get_soc_id')
get_soc_id_parser.set_defaults(func=get_soc_id_main)
write_flash_parser = subparsers.add_parser('write_flash')
write_flash_parser.set_defaults(func=write_flash_main)
write_flash_parser.add_argument('filename', type=str)
erase_flash_parser = subparsers.add_parser('erase_flash')
erase_flash_parser.set_defaults(func=erase_flash_main)
erase_flash_parser = subparsers.add_parser('cpu_run')
erase_flash_parser.set_defaults(func=cpu_run_main)
args = args_parser.parse_args()
# Initialize the serial port
global serial_port
serial_port = serial.Serial(
args.serial_port, BAUD, serial.EIGHTBITS, serial.PARITY_NONE, serial.STOPBITS_ONE)
args.func(args)
if __name__ == "__main__":
main()
|
"""
Test cases
David Johnston 2015
"""
import unittest
import numpy as np
import Agents
import Problems
def setupESA():
""" Test problem from Hutter(2014) """
gamma = 1/2.
r0 = gamma/2/(1+gamma)
r1 = (1+gamma/2)/(1+gamma)
transitions = np.array([[[0 ,0.5 ,0.5 ,0],
[0.5 ,0 ,0 ,0.5],
[0 ,1. ,0 ,0],
[1. ,0 ,0 ,0]]])
rewards = np.array([[[r0,r0,r0,r0],
[r1,r1,r1,r1],
[0 ,0 ,0 ,0],
[1 ,1 ,1 ,1]]])
aggregation = {'n':2,0:0,1:1,2:0,3:1}
qValues_raw = np.array([[gamma/(1-gamma**2),1/(1-gamma**2),
gamma/(1-gamma**2),1/(1-gamma**2)]]).reshape((4,1))
qValues_agg = np.array([[gamma/(1-gamma**2),1/(1-gamma**2)]]).reshape((2,1))
p_raw = Problems.MDP(0,transitions,rewards,gamma,qValues=qValues_raw)
p_agg = Problems.MDP(0,transitions,rewards,gamma,qValues=qValues_raw,
aggregation=aggregation)
return p_raw, p_agg
class TestMDPs(unittest.TestCase):
def test_Q(self):
p_raw, p_agg = setupESA()
ql_raw = Agents.QAgent(p_raw,1)
ql_agg = Agents.QAgent(p_agg,1)
ql_raw.episode(timeout = 1000)
ql_agg.episode(timeout = 1000)
delta_r = sum(ql_raw.qValues[0] - p_raw.qValues[0])/4
delta_a = sum(ql_agg.qValues[0] - p_agg.qValues[0])/2
print("\nQ learning raw delta = {}, agg delta = {}".format(delta_r,delta_a))
self.assertTrue(delta_r < 1e-1)
self.assertTrue(delta_a < 1e-1)
def test_SL(self):
p_raw, p_agg = setupESA()
sl_raw = Agents.SarsaLambda(p_raw,1)
sl_agg = Agents.SarsaLambda(p_agg,1)
sl_raw.episode(timeout = 1000)
sl_agg.episode(timeout = 1000)
delta_r = sum(sl_raw.qValues[0] - p_raw.qValues[0])/4
delta_a = sum(sl_agg.qValues[0] - p_agg.qValues[0])/2
print("\nSarsa(l) raw delta = {}, agg delta = {}".format(delta_r,delta_a))
self.assertTrue(delta_r < 1e-1)
self.assertTrue(delta_a < 1e-1)
def test_VI(self):
p_raw, _ = setupESA()
vi_raw = Agents.VIAgent(p_raw,1)
vi_raw.VISweep()
delta_r = sum(vi_raw.qValues[0] - p_raw.qValues[0])/4
print("\nValue Iteration raw delta = {}".format(delta_r))
self.assertTrue(delta_r < 1e-4)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
from sqlalchemy import Column, Integer, String
class BaseModel(object):
pass
class User(BaseModel):
pass
|
# cSpell:ignore smsl
# Setup ######################################################################
def _setup(sys_module, _imp_module):
"""Setup importlib by importing needed built-in modules and injecting them
into the global namespace.
As sys is needed for sys.modules access and _imp is needed to load built-in
modules, those two modules must be explicitly passed in.
"""
global _imp, sys
_imp = _imp_module
sys = sys_module
# Set up the spec for existing builtin/frozen modules.
module_type = type(sys)
for name, module in sys.modules.items():
if isinstance(module, module_type):
if name in sys.builtin_module_names:
loader = BuiltinImporter
else:
continue
spec = _spec_from_module(module, loader)
_init_module_attrs(spec, module)
# Directly load built-in modules needed during bootstrap.
self_module = sys.modules[__name__]
for builtin_name in ['_warnings']:
if builtin_name not in sys.modules:
builtin_module = _builtin_from_name(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
def _builtin_from_name(name):
spec = BuiltinImporter.find_spec(name)
if spec is None:
raise ImportError('no built-in module named ' + name)
return _load_unlocked(spec)
# Bootstrap-related code ######################################################
def _wrap(new, old):
"""Simple substitute for functools.update_wrapper."""
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
def _new_module(name):
return type(sys)(name)
# Frame stripping magic ###############################################
def _call_with_frames_removed(f, *args, **kwds):
"""remove_importlib_frames in import.c will always remove sequences
of importlib frames that end with a call to this function
Use it instead of a normal call in places where including the importlib
frames introduces unwanted noise into the traceback (e.g. when executing
module code)
"""
return f(*args, **kwds)
def _requires_builtin(fxn):
"""Decorator to verify the named module is built-in."""
def _requires_builtin_wrapper(self, fullname):
if fullname not in sys.builtin_module_names:
raise ImportError(f'{fullname!r} is not a built-in module', name=fullname)
return fxn(self, fullname)
_wrap(_requires_builtin_wrapper, fxn)
return _requires_builtin_wrapper
# Module specifications #######################################################
def _verbose_message(message, verbosity=1):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message, file=sys.stderr)
class _installed_safely:
def __init__(self, module):
self._module = module
self._spec = module.__spec__
def __enter__(self):
# This must be done before putting the module in sys.modules
# (otherwise an optimization shortcut in import.c becomes wrong)
self._spec._initializing = True
sys.modules[self._spec.name] = self._module
def __exit__(self, *args):
try:
spec = self._spec
# This was changed because Violet does not support comprehension
if any(map(lambda arg: arg is not None, args)):
try:
del sys.modules[spec.name]
except KeyError:
pass
else:
_verbose_message(f'import {spec.name!r} # {spec.loader!r}')
finally:
self._spec._initializing = False
class ModuleSpec:
"""The specification for a module, used for loading.
A module's spec is the source for information about the module. For
data associated with the module, including source, use the spec's
loader.
`name` is the absolute name of the module.
`loader` is the loader to use when loading the module.
`parent` is the name of the package the module is in.
The parent is derived from the name.
`is_package` determines if the module is considered a package or
not. On modules this is reflected by the `__path__` attribute.
`origin` is the specific location used by the loader from which to
load the module, if that information is available. When filename is
set, origin will match.
`has_location` indicates that a spec's "origin" reflects a location.
When this is True, `__file__` attribute of the module is set.
`submodule_search_locations` is the sequence of path entries to
search when importing submodules. If set, is_package should be
True--and False otherwise.
Packages are simply modules that (may) have submodules. If a spec
has a non-None value in `submodule_search_locations`, the import
system will consider modules loaded from the spec as packages.
Only finders (see importlib.abc.MetaPathFinder and
importlib.abc.PathEntryFinder) should modify ModuleSpec instances.
"""
def __init__(self, name, loader, *, origin=None, loader_state=None,
is_package=None):
self.name = name
self.loader = loader
self.origin = origin
self.loader_state = loader_state
self.submodule_search_locations = [] if is_package else None
# file-location attributes
self._set_fileattr = False
def __repr__(self):
args = [
f'name={self.name!r}',
f'loader={self.loader!r}'
]
if self.origin is not None:
args.append(f'origin={self.origin!r}')
if self.submodule_search_locations is not None:
args.append(f'submodule_search_locations={self.submodule_search_locations!r}')
name = self.__class__.__name__
args_string = ', '.join(args)
return f'{name}({args_string})'
def __eq__(self, other):
smsl = self.submodule_search_locations
try:
return (self.name == other.name and
self.loader == other.loader and
self.origin == other.origin and
smsl == other.submodule_search_locations and
self.has_location == other.has_location)
except AttributeError:
return False
@property
def parent(self):
"""The name of the module's parent."""
if self.submodule_search_locations is None:
return self.name.rpartition('.')[0]
else:
return self.name
@property
def has_location(self):
return self._set_fileattr
@has_location.setter
def has_location(self, value):
self._set_fileattr = bool(value)
def _spec_from_module(module, loader=None, origin=None):
# This function is meant for use in _setup().
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return spec
name = module.__name__
if loader is None:
try:
loader = module.__loader__
except AttributeError:
# loader will stay None.
pass
try:
location = module.__file__
except AttributeError:
location = None
if origin is None:
if location is None:
try:
origin = loader._ORIGIN
except AttributeError:
origin = None
else:
origin = location
try:
submodule_search_locations = list(module.__path__)
except AttributeError:
submodule_search_locations = None
spec = ModuleSpec(name, loader, origin=origin)
spec._set_fileattr = False if location is None else True
spec.submodule_search_locations = submodule_search_locations
return spec
def _init_module_attrs(spec, module, *, override=False):
# The passed-in module may be not support attribute assignment,
# in which case we simply don't set the attributes.
# __name__
if (override or getattr(module, '__name__', None) is None):
try:
module.__name__ = spec.name
except AttributeError:
pass
# __loader__
if override or getattr(module, '__loader__', None) is None:
try:
module.__loader__ = spec.loader
except AttributeError:
pass
# __package__
if override or getattr(module, '__package__', None) is None:
try:
module.__package__ = spec.parent
except AttributeError:
pass
# __spec__
try:
module.__spec__ = spec
except AttributeError:
pass
# __path__
if override or getattr(module, '__path__', None) is None:
if spec.submodule_search_locations is not None:
try:
module.__path__ = spec.submodule_search_locations
except AttributeError:
pass
# __file__
if spec.has_location:
if override or getattr(module, '__file__', None) is None:
try:
module.__file__ = spec.origin
except AttributeError:
pass
return module
def module_from_spec(spec):
"""Create a module based on the provided spec."""
# Typically loaders will not implement create_module().
module = None
if hasattr(spec.loader, 'create_module'):
# If create_module() returns `None` then it means default
# module creation should be used.
module = spec.loader.create_module(spec)
elif hasattr(spec.loader, 'exec_module'):
raise ImportError('loaders that define exec_module() must also define create_module()')
if module is None:
module = _new_module(spec.name)
_init_module_attrs(spec, module)
return module
def _load_backward_compatible(spec):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
spec.loader.load_module(spec.name)
# The module must be in sys.modules at this point!
module = sys.modules[spec.name]
if getattr(module, '__loader__', None) is None:
try:
module.__loader__ = spec.loader
except AttributeError:
pass
if getattr(module, '__package__', None) is None:
try:
# Since module.__path__ may not line up with
# spec.submodule_search_paths, we can't necessarily rely
# on spec.parent here.
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = spec.name.rpartition('.')[0]
except AttributeError:
pass
if getattr(module, '__spec__', None) is None:
try:
module.__spec__ = spec
except AttributeError:
pass
return module
def _load_unlocked(spec):
# A helper for direct use by the import system.
if spec.loader is not None:
# not a namespace package
if not hasattr(spec.loader, 'exec_module'):
return _load_backward_compatible(spec)
module = module_from_spec(spec)
with _installed_safely(module):
if spec.loader is None:
if spec.submodule_search_locations is None:
raise ImportError('missing loader', name=spec.name)
# A namespace package so do nothing.
else:
spec.loader.exec_module(module)
# We don't ensure that the import-related module attributes get
# set in the sys.modules replacement case. Such modules are on
# their own.
return sys.modules[spec.name]
# Loaders #####################################################################
class BuiltinImporter:
"""Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if path is not None:
return None
if _imp.is_builtin(fullname):
return spec_from_loader(fullname, cls, origin='built-in')
else:
return None
@classmethod
def create_module(self, spec):
"""Create a built-in module"""
if spec.name not in sys.builtin_module_names:
raise ImportError(f'{spec.name!r} is not a built-in module', name=spec.name)
return _call_with_frames_removed(_imp.create_builtin, spec)
@classmethod
def exec_module(self, module):
"""Exec a built-in module"""
_call_with_frames_removed(_imp.exec_builtin, module)
def spec_from_loader(name, loader, *, origin=None, is_package=None):
"""Return a module spec based on various loader methods."""
if hasattr(loader, 'get_filename'):
if _bootstrap_external is None:
raise NotImplementedError
spec_from_file_location = _bootstrap_external.spec_from_file_location
if is_package is None:
return spec_from_file_location(name, loader=loader)
search = [] if is_package else None
return spec_from_file_location(name, loader=loader,
submodule_search_locations=search)
if is_package is None:
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
is_package = None # aka, undefined
else:
# the default
is_package = False
return ModuleSpec(name, loader, origin=origin, is_package=is_package)
# Import itself ###############################################################
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
msg = f"the 'package' argument is required to perform a relative import for {name!r}"
raise TypeError(msg)
for character in name:
if character != '.':
break
level += 1
return _gcd_import(name[level:], package, level)
def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit('.', level - 1)
if len(bits) < level:
raise ValueError('attempted relative import beyond top-level package')
base = bits[0]
return f'{base}.{name}' if name else base
def _find_spec(name, path, target=None):
"""Find a module's spec."""
meta_path = sys.meta_path
if meta_path is None:
# PyImport_Cleanup() is running or has been called.
raise ImportError("sys.meta_path is None, Python is likely shutting down")
if not meta_path:
_warnings.warn('sys.meta_path is empty', ImportWarning)
# We check sys.modules here for the reload case. While a passed-in
# target will usually indicate a reload there is no guarantee, whereas
# sys.modules provides one.
is_reload = name in sys.modules
for finder in meta_path:
try:
find_spec = finder.find_spec
except AttributeError:
continue
else:
spec = find_spec(name, path, target)
if spec is not None:
# The parent import may have already imported this module.
if not is_reload and name in sys.modules:
module = sys.modules[name]
try:
__spec__ = module.__spec__
except AttributeError:
# We use the found spec since that is the one that
# we would have used if the parent module hadn't
# beaten us to the punch.
return spec
else:
if __spec__ is None:
return spec
else:
return __spec__
else:
return spec
else:
return None
def _sanity_check(name, package, level):
"""Verify arguments are "sane"."""
if not isinstance(name, str):
raise TypeError(f'module name must be str, not {type(name)}')
if level < 0:
raise ValueError('level must be >= 0')
if level > 0:
if not isinstance(package, str):
raise TypeError('__package__ not set to a string')
elif not package:
raise ImportError('attempted relative import with no known parent package')
if not name and level == 0:
raise ValueError('Empty module name')
def _find_and_load_unlocked(name, import_):
path = None
parent = name.rpartition('.')[0]
if parent:
if parent not in sys.modules:
_call_with_frames_removed(import_, parent)
# Crazy side-effects!
if name in sys.modules:
return sys.modules[name]
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = f"No module named '{name!r}'; {parent!r} is not a package"
raise ModuleNotFoundError(msg, name=name) from None
spec = _find_spec(name, path)
if spec is None:
raise ModuleNotFoundError(f"No module named '{name}'", name=name)
else:
module = _load_unlocked(spec)
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
setattr(parent_module, name.rpartition('.')[2], module)
return module
_NEEDS_LOADING = object()
def _find_and_load(name, import_):
"""Find and load the module."""
module = sys.modules.get(name, _NEEDS_LOADING)
if module is _NEEDS_LOADING:
return _find_and_load_unlocked(name, import_)
if module is None:
message = (f'import of {name} halted; None in sys.modules')
raise ModuleNotFoundError(message, name=name)
return module
def _gcd_import(name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
return _find_and_load(name, _gcd_import)
def _handle_fromlist(module, fromlist, import_, *, recursive=False):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, '__path__'):
for x in fromlist:
if not isinstance(x, str):
if recursive:
where = module.__name__ + '.__all__'
else:
where = "``from list''"
raise TypeError(f"Item in {where} must be str, not {type(x).__name__}")
elif x == '*':
if not recursive and hasattr(module, '__all__'):
_handle_fromlist(module, module.__all__, import_, recursive=True)
elif not hasattr(module, x):
from_name = f'{module.__name__}.{x}'
try:
_call_with_frames_removed(import_, from_name)
except ModuleNotFoundError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't exist.
if (exc.name == from_name and sys.modules.get(from_name, _NEEDS_LOADING) is not None):
continue
raise
return module
# Main ########################################################################
def _install(sys_module, _imp_module):
"""Install importers for builtin modules"""
_setup(sys_module, _imp_module)
sys.meta_path.append(BuiltinImporter)
|
#coding: utf-8
import django
from django.views.generic import TemplateView, CreateView, UpdateView, DeleteView, DetailView, RedirectView
from django.views.generic.list import ListView
from django.views.generic.edit import BaseFormView, FormView, ProcessFormView
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse, reverse_lazy
from django.template import RequestContext
from app.models import *
from app.forms import *
from app.forms import UserWithEmailCreationForm
from app.view_mixins import *
from django.core import mail
from django.contrib import auth
RECORDS_PER_PAGE = 5
class AfterLoginRedirectView(RedirectView):
url_for_admin = reverse_lazy('admin:index')
url_for_employer = reverse_lazy('EmployerHome')
url_for_applicant = reverse_lazy('ApplicantHome')
url_for_new = reverse_lazy('ChooseRole')
def get_redirect_url(self, *args, **kwargs):
employers = Employer.objects.filter(profile__user__id=self.request.user.id)
applicants = Applicant.objects.filter(profile__user__id=self.request.user.id)
if self.request.user.is_staff:
return self.url_for_admin
if len(employers) == 1:
return self.url_for_employer
elif len(applicants) == 1:
return self.url_for_applicant
return self.url_for_new
class ToMyResponseRedirectView(RedirectView):
def get_redirect_url(self, *args, **kwargs):
vacancy_id = kwargs['pk']
my_response_id = Response.objects.get(vacancy__id=vacancy_id, applicant__profile=self.request.user.profile).id
return reverse('ShowResponse', args=(my_response_id,))
class AboutView(AlertMixin, TemplateView):
template_name = "app/other/about.html"
class RegisterSuccessView(TemplateView):
template_name = "registration/registered.html"
class AccessDeniedView(TemplateView):
template_name = "app/other/access_denied.html"
class HomeView(TemplateView):
template_name = "app/other/home.html"
class ChooseRoleView(FormView):
template_name = "app/other/choose_role.html"
form_class = ChooseRoleForm
employer_success_url = reverse_lazy('EmployerHome')
applicant_success_url = reverse_lazy('ApplicantHome')
default_success_url = reverse_lazy('About')
def post(self, request, *args, **kwargs):
role = request.POST["role"]
profile = Profile.objects.create(user=request.user)
if role == "EMP":
Employer.objects.create(profile=profile)
return HttpResponseRedirect(self.employer_success_url)
elif role == "APP":
Applicant.objects.create(profile=profile)
return HttpResponseRedirect(self.applicant_success_url)
return HttpResponseRedirect(self.default_success_url)
class UserCreateView(CreateView):
model = auth.get_user_model()
form_class = UserWithEmailCreationForm
template_name = "registration/register.html"
success_url = reverse_lazy('Registered')
email_subject = 'Test message'
email_body = 'Hello world!'
email_from = 'registration@empagency.com'
def form_valid(self, form):
mail.send_mail(self.email_subject, self.email_body, self.email_from, [form.cleaned_data["email"]])
return super(UserCreateView, self).form_valid(form)
class ApplicantHomeView(HomePageAlertMixin, DenyIfNotApplicantMixin, TemplateView):
template_name = "app/other/applicant_home.html"
cnt_last_vacancies = 3
cnt_my_last_responses = 3
cnt_my_last_applications = 3
def get_context_data(self, **kwargs):
last_vacancies = Vacancy.objects.order_by('-publish_date')[:self.cnt_last_vacancies]
applicant = Applicant.objects.get(profile=self.request.user.profile)
my_last_responses = Response.objects.filter(applicant=applicant).order_by('-response_date')[
:self.cnt_my_last_responses]
my_last_applications = Application.objects.filter(applicant=applicant).order_by('-publish_date')[
:self.cnt_my_last_applications]
context = super(ApplicantHomeView, self).get_context_data()
context['last_vacancies'] = last_vacancies
context['my_last_responses'] = my_last_responses
context['my_last_applications'] = my_last_applications
context['applicant'] = applicant
return context
class EmployerHomeView(HomePageAlertMixin, DenyIfNotEmployerMixin, TemplateView):
template_name = "app/other/employer_home.html"
cnt_last_applications = 3
cnt_last_responses_for_my_vacancies = 3
cnt_my_last_vacancies = 3
def get_context_data(self, **kwargs):
last_applications = Application.objects.order_by('-publish_date')[:self.cnt_last_applications]
employer = Employer.objects.get(profile=self.request.user.profile)
last_responses_for_my_vacancies = Response.objects.filter(vacancy__employer=employer).order_by(
'-response_date')[:self.cnt_last_responses_for_my_vacancies]
my_last_vacancies = Vacancy.objects.filter(employer=employer).order_by('-publish_date')[
:self.cnt_my_last_vacancies]
context = super(EmployerHomeView, self).get_context_data()
context['last_applications'] = last_applications
context['last_responses_for_my_vacancies'] = last_responses_for_my_vacancies
context['my_last_vacancies'] = my_last_vacancies
context['employer'] = employer
return context
class VacanciesListView(AlertMixin, SelfnamedMixin, TagSearchMixin, PageHeaderMixin, ListView):
model = Vacancy
context_object_name = 'vacancies'
template_name = 'app/vacancy/list.html'
paginate_by = RECORDS_PER_PAGE
form_class = TagForm
my_name = 'Vacancies'
page_header = 'Все вакансии'
class MyVacanciesListView(DenyIfNotEmployerMixin, SelfnamedMixin, TagSearchMixin, PageHeaderMixin, ListView):
model = Vacancy
context_object_name = 'vacancies'
template_name = 'app/vacancy/list.html'
paginate_by = RECORDS_PER_PAGE
form_class = TagForm
my_name = 'MyVacancies'
page_header = 'Мои вакансии'
def get_queryset(self):
queryset = super(MyVacanciesListView, self).get_queryset()
queryset = queryset.filter(employer__profile=self.request.user.profile)
return queryset
class VacancyCreateView(FormVacancyPageAlertMixin, DenyIfNotEmployerMixin, CreateView):
model = Vacancy
form_class = VacancyForm
context_object_name = 'vacancy'
template_name = 'app/vacancy/form.html'
success_url = reverse_lazy('Vacancies')
def get_form_kwargs(self):
new_kwargs = super(VacancyCreateView, self).get_form_kwargs()
new_kwargs['initial']['employer'] = Employer.objects.get(profile=self.request.user.profile)
return new_kwargs
class VacancyUpdateView(FormVacancyPageAlertMixin, DenyIfEmployerNotOwnerMixin, UpdateView):
model = Vacancy
form_class = VacancyForm
context_object_name = 'vacancy'
template_name = 'app/vacancy/form.html'
success_url = reverse_lazy('Vacancies')
class VacancyDeleteView(DenyIfEmployerNotOwnerMixin, DeleteView):
model = Vacancy
context_object_name = 'vacancy'
template_name = 'app/vacancy/delete.html'
success_url = reverse_lazy('Vacancies')
class ResponseCreateView(DenyIfNotApplicantMixin, CreateView):
model = Response
form_class = ResponseForm
context_object_name = 'response'
template_name = 'app/response/form.html'
success_url = reverse_lazy('Vacancies')
def redirect_if_denied(self):
redirect = super(ResponseCreateView, self).redirect_if_denied()
if redirect:
return redirect
else:
vacancy = Vacancy.objects.get(pk=self.kwargs['pk'])
if Response.objects.filter(vacancy=vacancy, applicant__profile=self.request.user.profile).count() > 0:
my_response_id = Response.objects.get(vacancy=vacancy, applicant__profile=self.request.user.profile).id
return HttpResponseRedirect(reverse('ShowResponse', args=(my_response_id,)))
def get_form_kwargs(self):
new_kwargs = super(ResponseCreateView, self).get_form_kwargs()
applicant = Applicant.objects.get(profile=self.request.user.profile)
new_kwargs['initial']['applicant'] = applicant
new_kwargs['initial']['vacancy'] = Vacancy.objects.get(pk=self.kwargs['pk'])
new_kwargs['initial']['text'] = applicant.default_response()
return new_kwargs
class ResponseDetailView(RedirectIfDenyMixin, DetailView):
model = Response
context_object_name = 'response'
template_name = 'app/response/detail.html'
def redirect_if_denied(self):
profile = self.request.user.profile
response = self.get_object()
if profile.is_employer():
if response.vacancy.employer.profile != profile:
return HttpResponseRedirect(reverse('AccessDenied'))
elif profile.is_applicant():
if response.applicant.profile != profile:
return HttpResponseRedirect(reverse('AccessDenied'))
class MyResponsesListView(DenyIfNotApplicantMixin, SelfnamedMixin, TagSearchMixin, PageHeaderMixin, ListView):
model = Response
context_object_name = 'responses'
template_name = 'app/response/list.html'
paginate_by = RECORDS_PER_PAGE
form_class = TagForm
my_name = 'MyResponses'
page_header = 'Мои отклики'
def get_queryset(self):
queryset = super(MyResponsesListView, self).get_queryset()
queryset = queryset.filter(applicant__profile=self.request.user.profile)
return queryset
class ResponsesForMyVacanciesListView(DenyIfNotEmployerMixin, SelfnamedMixin, TagSearchMixin, PageHeaderMixin, ListView):
model = Response
form_class = TagForm
context_object_name = 'responses'
template_name = 'app/response/list.html'
paginate_by = RECORDS_PER_PAGE
my_name = 'ResponsesForMyVacancies'
page_header = 'Отклики на мои вакансии'
def get_queryset(self):
queryset = super(ResponsesForMyVacanciesListView, self).get_queryset()
queryset = queryset.filter(vacancy__employer__profile=self.request.user.profile)
return queryset
class VacancyDetailView(DetailView):
model = Vacancy
context_object_name = 'vacancy'
template_name = 'app/vacancy/detail.html'
def get_context_data(self, **kwargs):
context = super(VacancyDetailView, self).get_context_data()
vacancy = kwargs['object']
context['responses'] = Response.objects.filter(vacancy=vacancy)
if self.request.user.is_authenticated():
if Response.objects.filter(vacancy=vacancy, applicant__profile=self.request.user.profile).count() > 0:
context['my_response'] = Response.objects.get(vacancy=vacancy,
applicant__profile=self.request.user.profile)
return context
class ProfileDetailView(DetailView):
model = Profile
context_object_name = 'profile'
template_name = 'app/profile/detail.html'
def get_context_data(self, **kwargs):
context = super(ProfileDetailView, self).get_context_data()
profile = kwargs['object']
if profile.is_employer():
context['employer'] = Employer.objects.get(profile=profile)
elif profile.is_applicant():
context['applicant'] = Applicant.objects.get(profile=profile)
return context
class ProfileUpdateView(ProcessFormView):
template_name = 'app/profile/form.html'
def get_profile_form(self):
profile = self.request.user.profile
return ProfileForm(self.request.POST or None, instance=profile)
def get_about_me_form(self):
profile = self.request.user.profile
if profile.is_applicant():
applicant = Applicant.objects.get(profile=profile)
about_me_form = ApplicantForm(self.request.POST or None, self.request.FILES or None, instance=applicant)
else:
employer = Employer.objects.get(profile=profile)
about_me_form = EmployerForm(self.request.POST or None, self.request.FILES or None, instance=employer)
return about_me_form
def get_data_context(self):
context = {
'profile': self.get_profile_form(),
'about_me': self.get_about_me_form()
}
return context
def get_forms_valid(self):
profile_form = self.get_profile_form()
about_me_form = self.get_about_me_form()
return profile_form.is_valid() and about_me_form.is_valid()
def save_forms(self):
profile_form = self.get_profile_form()
about_me_form = self.get_about_me_form()
profile_form.save()
about_me_form.save()
def get(self, request, *args, **kwargs):
self.request = request
context = self.get_data_context()
return render_to_response(self.template_name, context, context_instance=RequestContext(request))
def post(self, request, *args, **kwargs):
self.request = request
if self.get_forms_valid():
self.save_forms()
return HttpResponseRedirect(reverse('ShowProfile', args=(request.user.profile.id,)))
context = self.get_data_context()
return render_to_response(self.template_name, context, context_instance=RequestContext(request))
class ApplicationCreateView(FormApplicationPageAlertMixin, DenyIfNotApplicantMixin, CreateView):
model = Application
form_class = ApplicationForm
context_object_name = 'application'
template_name = 'app/application/form.html'
success_url = reverse_lazy('MyApplications')
def get_form_kwargs(self):
new_kwargs = super(ApplicationCreateView, self).get_form_kwargs()
new_kwargs['initial']['applicant'] = Applicant.objects.get(profile=self.request.user.profile)
return new_kwargs
class ApplicationUpdateView(FormApplicationPageAlertMixin, DenyIfApplicantNotOwnerMixin, UpdateView):
model = Application
form_class = ApplicationForm
context_object_name = 'application'
template_name = 'app/application/form.html'
success_url = reverse_lazy('MyApplications')
class ApplicationListView(SelfnamedMixin, TagSearchMixin, PageHeaderMixin, ListView):
model = Application
form_class = TagForm
context_object_name = 'applications'
template_name = 'app/application/list.html'
paginate_by = RECORDS_PER_PAGE
my_name = 'Applications'
page_header = 'Все заявления'
class MyApplicationListView(DenyIfNotApplicantMixin, SelfnamedMixin, TagSearchMixin, PageHeaderMixin, ListView):
model = Application
form_class = TagForm
context_object_name = 'applications'
template_name = 'app/application/list.html'
paginate_by = RECORDS_PER_PAGE
my_name = 'MyApplications'
page_header = 'Мои заявления'
def get_queryset(self):
queryset = super(MyApplicationListView, self).get_queryset()
queryset = queryset.filter(applicant__profile=self.request.user.profile)
return queryset
class ApplicationDeleteView(DenyIfApplicantNotOwnerMixin, DeleteView):
model = Application
context_object_name = 'application'
template_name = 'app/application/delete.html'
success_url = reverse_lazy('MyApplications')
class ApplicationDetailView(DetailView):
model = Application
context_object_name = 'application'
template_name = 'app/application/detail.html' |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import subsequent_mask
"""
important: concat all data and org them to batches
learned from: https://github.com/pytorch/examples/blob/master/word_language_model/main.py
"""
class Dataset(Dataset):
def __init__(self, data, batch_size, bptt, pad_idx):
concat_data = [x for sub in data for x in sub] # [[...], [...]] -> [..., ...]
concat_data = torch.tensor(concat_data).type(torch.int64)
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = concat_data.size(0) // batch_size
# Trim off any extra elements that wouldn't cleanly fit (remainders).
concat_data = concat_data.narrow(0, 0, nbatch * batch_size)
# Evenly divide the data across the bsz batches.
self.data = concat_data.view(batch_size, -1).t()
# Starting from sequential data, batchify arranges the dataset into columns.
# For instance, with the alphabet as the sequence and batch size 4, we'd get
# ┌ a g m s ┐
# │ b h n t │
# │ c i o u │
# │ d j p v │
# │ e k q w │
# └ f l r x ┘.
# These columns are treated as independent by the model, which means that the
# dependence of e. g. 'g' on 'f' can not be learned, but allows more efficient
# batch processing.
self.batch_size = batch_size
self.bptt = bptt
self.pad_idx = pad_idx
def __len__(self):
return self.data.size(0)
def __getitem__(self, idx):
raise NotImplementedError
def get_batch(self, i):
seq_len = min(self.bptt, self.__len__() - 1 - i)
data = self.data[i:i+seq_len].t().contiguous()
target = self.data[i+1:i+1+seq_len].t().contiguous()
attn_mask = self.make_attn_mask(data, self.pad_idx)
target_mask = self.make_target_mask(target, self.pad_idx)
return data, target, attn_mask, target_mask
def make_attn_mask(self, sent, pad_idx):
"create a mask to hide padding and future words"
mask = (sent != pad_idx).unsqueeze(-2)
mask = mask & subsequent_mask(sent.size(-1)).type_as(mask)
return mask
def make_target_mask(self, sent, pad_idx):
return sent != pad_idx
def main():
data = [
[1, 2, 3],
[1, 2],
[1, 2, 3, 4],
[1, 2],
[1, 2, 3, 4, 5],
]
bsz = 4
bptt = 3
pad_idx = 0
dataset = Dataset(data, bsz, bptt, pad_idx)
# dataset.local_sort(4)
# dataset.local_sort(2)
print(f"\nall data")
print(dataset.data)
# ┌ 1 2 4 2 ┐
# │ 2 1 1 3 │
# │ 3 2 2 4 │
# └ 1 3 1 5 ┘
print(f"\npossible range")
print(range(0, len(dataset) - 1, bptt))
i = 0
data, target, attn_mask, target_mask = dataset.get_batch(i)
print(f"\nbatch data")
print(data)
print(target)
print(attn_mask)
print(target_mask)
print(torch.sum(target_mask))
if __name__ == "__main__": main() |
# Generated by Django 2.2.1 on 2019-10-29 10:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comedians', '0007_auto_20191027_1858'),
]
operations = [
migrations.AlterField(
model_name='registercomedian',
name='profile_pic',
field=models.ImageField(blank=True, default='default_pp.jpg', upload_to='profile_pic/'),
),
]
|
########################################################################################################################
# Module: transport/smc.py
# Description: SMC samplers for tempered sequence of distributions.
#
# Web: https://github.com/SamDuffield/mocat
########################################################################################################################
from typing import Tuple, Union, Type
from inspect import isclass
from jax import numpy as jnp, random, vmap
from jax.lax import scan, cond
from jax.scipy.special import logsumexp
from mocat.src.core import Scenario, cdict
from mocat.src.transport.sampler import TransportSampler
from mocat.src.mcmc.sampler import MCMCSampler, Correction, check_correction
from mocat.src.mcmc.metropolis import Metropolis
from mocat.src.utils import bisect
from mocat.src.metrics import log_ess_log_weight
class SMCSampler(TransportSampler):
name = 'SMC Sampler'
def startup(self,
scenario: Scenario,
n: int,
initial_state: cdict,
initial_extra: cdict,
**kwargs) -> Tuple[cdict, cdict]:
initial_state, initial_extra = super().startup(scenario, n, initial_state, initial_extra, **kwargs)
if not hasattr(initial_extra, 'resample_bool'):
initial_extra.resample_bool = True
if not hasattr(initial_state, 'log_weight'):
initial_state.log_weight = jnp.zeros(n)
if not hasattr(initial_state, 'ess'):
initial_state.ess = jnp.zeros(n) + n
return initial_state, initial_extra
def forward_proposal(self,
scenario: Scenario,
previous_state: cdict,
previous_extra: cdict,
random_key: jnp.ndarray) -> cdict:
raise AttributeError(f'{self.name} forward_proposal not initiated')
def adapt(self,
previous_ensemble_state: cdict,
previous_extra: cdict,
new_ensemble_state: cdict,
new_extra: cdict) -> Tuple[cdict, cdict]:
new_ensemble_state.log_weight = previous_ensemble_state.log_weight + new_ensemble_state.log_weight
return new_ensemble_state, new_extra
def resample_criterion(self,
ensemble_state: cdict,
extra: cdict) -> bool:
return True
def resample(self,
ensemble_state: cdict,
random_key: jnp.ndarray) -> cdict:
n = ensemble_state.value.shape[0]
resampled_indices = random.categorical(random_key,
ensemble_state.log_weight,
shape=(n,))
resampled_ensemble_state = ensemble_state[resampled_indices]
resampled_ensemble_state.log_weight = jnp.zeros(n)
resampled_ensemble_state.ess = jnp.zeros(n) + n
return resampled_ensemble_state
def update(self,
scenario: Scenario,
ensemble_state: cdict,
extra: cdict) -> Tuple[cdict, cdict]:
extra.iter = extra.iter + 1
n = ensemble_state.value.shape[0]
extra.resample_bool = self.resample_criterion(ensemble_state, extra)
random_keys_all = random.split(extra.random_key, n + 2)
extra.random_key = random_keys_all[-1]
resampled_ensemble_state \
= cond(extra.resample_bool,
lambda state: self.resample(state, random_keys_all[-2]),
lambda state: state,
ensemble_state)
advanced_state = vmap(self.forward_proposal,
in_axes=(None, 0, None, 0))(scenario,
resampled_ensemble_state,
extra,
random_keys_all[:n])
advanced_state, advanced_extra = self.adapt(resampled_ensemble_state, extra,
advanced_state, extra)
return advanced_state, advanced_extra
class TemperedSMCSampler(SMCSampler):
name = 'Tempered SMC Sampler'
def __init__(self,
temperature_schedule: Union[None, jnp.ndarray] = None,
max_temperature: float = 1.,
max_iter: int = int(1e4),
**kwargs):
self.max_iter = max_iter
self.max_temperature = max_temperature
self.temperature_schedule = temperature_schedule
super().__init__(**kwargs)
def __setattr__(self, key, value):
if key == 'temperature_schedule':
if value is None:
self.next_temperature = self.next_temperature_adaptive
if hasattr(self, 'temperature_schedule') and self.temperature_schedule is not None \
and self.max_iter == len(self.temperature_schedule):
self.max_iter = int(1e4)
else:
self.next_temperature = lambda state, extra: self.temperature_schedule[extra.iter]
self.max_temperature = value[-1]
self.max_iter = len(value)
super().__setattr__(key, value)
def startup(self,
scenario: Scenario,
n: int,
initial_state: cdict,
initial_extra: cdict,
initiate_potential: bool = True,
**kwargs) -> Tuple[cdict, cdict]:
if not hasattr(scenario, 'prior_sample'):
raise TypeError(f'Likelihood tempering requires scenario {scenario.name} to have prior_sample implemented')
initial_state, initial_extra = super().startup(scenario, n, initial_state, initial_extra, **kwargs)
random_keys = random.split(initial_extra.random_key, 2 * n + 1)
initial_extra.random_key = random_keys[-1]
if initiate_potential:
if hasattr(scenario, 'prior_potential_and_grad'):
initial_state.prior_potential, initial_state.grad_prior_potential \
= vmap(scenario.prior_potential_and_grad)(initial_state.value, random_keys[:n])
initial_state.likelihood_potential, initial_state.grad_likelihood_potential \
= vmap(scenario.likelihood_potential_and_grad)(initial_state.value, random_keys[n:(2 * n)])
initial_state.potential = initial_state.prior_potential
initial_state.grad_potential = initial_state.grad_prior_potential
else:
initial_state.prior_potential = vmap(scenario.prior_potential)(initial_state.value, random_keys[:n])
initial_state.likelihood_potential = vmap(scenario.likelihood_potential)(initial_state.value,
random_keys[n:(2 * n)])
initial_state.potential = initial_state.prior_potential
initial_state.temperature = jnp.zeros(n)
initial_state.log_weight = jnp.zeros(n)
initial_state.ess = jnp.zeros(n) + n
initial_state.log_norm_constant = logsumexp(initial_state.log_weight, b=1 / n) * jnp.ones(n)
scenario.temperature = 0.
return initial_state, initial_extra
def next_temperature_adaptive(self,
ensemble_state: cdict,
extra: cdict) -> float:
raise AttributeError(f'{self.name} next_temperature_adaptive not initiated')
def termination_criterion(self,
ensemble_state: cdict,
extra: cdict) -> bool:
return jnp.logical_or(jnp.logical_or(ensemble_state.temperature[0] >= self.max_temperature,
extra.iter >= self.max_iter), jnp.isnan(ensemble_state.value).mean() > 0.1)
def clean_chain(self,
scenario: Scenario,
chain_ensemble_state: cdict) -> cdict:
chain_ensemble_state.temperature = chain_ensemble_state.temperature[:, 0]
scenario.temperature = float(chain_ensemble_state.temperature[-1])
chain_ensemble_state.ess = chain_ensemble_state.ess[:, 0]
chain_ensemble_state.log_norm_constant = chain_ensemble_state.log_norm_constant[:, 0]
return chain_ensemble_state
def log_weight(self,
previous_ensemble_state: cdict,
previous_extra: cdict,
new_ensemble_state: cdict,
new_extra: cdict) -> Union[float, jnp.ndarray]:
return 0.
def adapt(self,
previous_ensemble_state: cdict,
previous_extra: cdict,
new_ensemble_state: cdict,
new_extra: cdict) -> Tuple[cdict, cdict]:
n = new_ensemble_state.value.shape[0]
next_temperature = self.next_temperature(new_ensemble_state, new_extra)
new_ensemble_state.temperature = jnp.ones(n) * next_temperature
new_ensemble_state.log_weight = previous_ensemble_state.log_weight \
+ self.log_weight(previous_ensemble_state, previous_extra,
new_ensemble_state, new_extra)
new_ensemble_state.ess = jnp.ones(n) * jnp.exp(log_ess_log_weight(new_ensemble_state.log_weight))
new_ensemble_state.potential = new_ensemble_state.prior_potential \
+ next_temperature * new_ensemble_state.likelihood_potential
if hasattr(new_ensemble_state, 'grad_potential'):
new_ensemble_state.grad_potential \
= new_ensemble_state.grad_prior_potential \
+ next_temperature * new_ensemble_state.grad_likelihood_potential
new_ensemble_state.log_norm_constant \
= previous_ensemble_state.log_norm_constant \
+ logsumexp(new_ensemble_state.log_weight) \
- logsumexp(previous_ensemble_state.log_weight)
return new_ensemble_state, new_extra
def update(self,
scenario: Scenario,
ensemble_state: cdict,
extra: cdict) -> Tuple[cdict, cdict]:
scenario.temperature = ensemble_state.temperature[0]
advanced_state, advanced_extra = super().update(scenario, ensemble_state, extra)
return advanced_state, advanced_extra
class MetropolisedSMCSampler(TemperedSMCSampler):
name = "Metropolised SMC Sampler"
def __init__(self,
mcmc_sampler: Union[MCMCSampler, Type[MCMCSampler]],
mcmc_correction: Union[Correction, Type[Correction], str] = 'sampler_default',
mcmc_steps: int = 1,
max_iter: int = int(1e4),
temperature_schedule: Union[None, jnp.ndarray] = None,
max_temperature: float = 1.,
ess_threshold_retain: float = 0.9,
ess_threshold_resample: float = 0.5,
bisection_tol: float = 1e-5,
max_bisection_iter: int = 1000,
**kwargs):
if temperature_schedule is not None:
if temperature_schedule[0] == 0.:
temperature_schedule = temperature_schedule[1:]
super().__init__(max_iter=max_iter, temperature_schedule=temperature_schedule, max_temperature=max_temperature,
**kwargs)
if isclass(mcmc_sampler):
mcmc_sampler = mcmc_sampler()
self.mcmc_sampler = mcmc_sampler
if mcmc_correction != 'sampler_default':
self.mcmc_sampler.correction = mcmc_correction
self.parameters.mcmc_steps = mcmc_steps
self.parameters.ess_threshold_retain = ess_threshold_retain
self.parameters.ess_threshold_resample = ess_threshold_resample
self.parameters.bisection_tol = bisection_tol
self.parameters.max_bisection_iter = max_bisection_iter
def __setattr__(self, key, value):
if key == 'temperature_schedule':
if value is not None and value[0] == 0.:
value = value[1:]
super().__setattr__(key, value)
def startup(self,
scenario: Scenario,
n: int,
initial_state: cdict,
initial_extra: cdict,
**kwargs) -> Tuple[cdict, cdict]:
self.mcmc_sampler.correction = check_correction(self.mcmc_sampler.correction)
initial_state, initial_extra = super().startup(scenario, n, initial_state, initial_extra, **kwargs)
first_temp = self.next_temperature(initial_state, initial_extra)
scenario.temperature = first_temp
initial_state.temperature += first_temp
initial_state.potential = initial_state.prior_potential + first_temp * initial_state.likelihood_potential
if hasattr(initial_state, 'grad_potential'):
initial_state.grad_potential = initial_state.grad_prior_potential \
+ first_temp * initial_state.grad_likelihood_potential
initial_state.log_weight = - first_temp * initial_state.likelihood_potential
initial_state.ess = jnp.repeat(jnp.exp(log_ess_log_weight(initial_state.log_weight)), n)
initial_state.log_norm_constant = logsumexp(initial_state.log_weight, b=1 / n) * jnp.ones(n)
initial_state, initial_extra = vmap(
lambda state: self.mcmc_sampler.startup(scenario,
n,
state,
initial_extra))(initial_state)
initial_extra = initial_extra[0]
return initial_state, initial_extra
def resample_criterion(self,
ensemble_state: cdict,
extra: cdict) -> bool:
return ensemble_state.ess[0] <= (extra.parameters.ess_threshold_resample * len(ensemble_state.value))
@staticmethod
def log_ess(previous_log_weight: jnp.ndarray,
current_temperature: float,
new_temperature: float,
likelihood_potential: jnp.ndarray) -> float:
log_weight = previous_log_weight - (new_temperature - current_temperature) * likelihood_potential
return log_ess_log_weight(log_weight)
def next_temperature_adaptive(self,
ensemble_state: cdict,
extra: cdict) -> float:
temperature_bounds = jnp.array([ensemble_state.temperature[0], self.max_temperature])
likelihood_potential = ensemble_state.likelihood_potential
log_n_samp_threshold = jnp.log(ensemble_state.ess[0] * self.parameters.ess_threshold_retain)
bisect_out_bounds, bisect_out_evals, bisect_out_iter \
= bisect(lambda x: self.log_ess(ensemble_state.log_weight,
temperature_bounds[0],
x,
likelihood_potential) - log_n_samp_threshold,
temperature_bounds,
max_iter=self.parameters.max_bisection_iter,
tol=self.parameters.bisection_tol)
return bisect_out_bounds[jnp.argmin(jnp.abs(bisect_out_evals))]
def clean_mcmc_chain(self,
chain_state: cdict,
chain_extra: cdict) -> Tuple[cdict, cdict]:
clean_state = chain_state[-1]
clean_extra = chain_extra[-1]
clean_state.alpha = chain_state.alpha.mean()
clean_extra.parameters = chain_extra.parameters[-1]
return clean_state, clean_extra
def forward_proposal(self,
scenario: Scenario,
state: cdict,
extra: cdict,
random_key: jnp.ndarray) -> cdict:
def mcmc_kernel(previous_carry: Tuple[cdict, cdict],
_: None) -> Tuple[Tuple[cdict, cdict], Tuple[cdict, cdict]]:
new_carry = self.mcmc_sampler.update(scenario, *previous_carry)
return new_carry, new_carry
extra.random_key = random_key
start_state, start_extra = self.mcmc_sampler.startup(scenario,
extra.parameters.mcmc_steps,
state,
extra)
final_carry, chain = scan(mcmc_kernel,
(start_state, start_extra),
None,
length=self.parameters.mcmc_steps)
advanced_state, advanced_extra = self.clean_mcmc_chain(chain[0], chain[1])
advanced_state.prior_potential = scenario.prior_potential(advanced_state.value, advanced_extra.random_key)
advanced_state.likelihood_potential = (advanced_state.potential - advanced_state.prior_potential) \
/ scenario.temperature
return advanced_state
def log_weight(self,
previous_ensemble_state: cdict,
previous_extra: cdict,
new_ensemble_state: cdict,
new_extra: cdict) -> jnp.ndarray:
return - (new_ensemble_state.temperature[0] - previous_ensemble_state.temperature[0]) \
* new_ensemble_state.likelihood_potential
class RMMetropolisedSMCSampler(MetropolisedSMCSampler):
def __init__(self,
mcmc_sampler: Union[MCMCSampler, Type[MCMCSampler]],
mcmc_correction: Union[Correction, Type[Correction], str] = Metropolis,
mcmc_steps: int = 1,
max_iter: int = int(1e4),
temperature_schedule: Union[None, jnp.ndarray] = None,
max_temperature: float = 1.,
ess_threshold_retain: float = 0.9,
ess_threshold_resample: float = 0.5,
bisection_tol: float = 1e-5,
max_bisection_iter: int = 1000,
rm_stepsize: float = 1.,
**kwargs):
super().__init__(mcmc_sampler=mcmc_sampler, mcmc_correction=mcmc_correction, mcmc_steps=mcmc_steps,
max_iter=max_iter, temperature_schedule=temperature_schedule, max_temperature=max_temperature,
ess_threshold_retain=ess_threshold_retain, ess_threshold_resample=ess_threshold_resample,
bisection_tol=bisection_tol, max_bisection_iter=max_bisection_iter, **kwargs)
self.parameters.rm_stepsize = rm_stepsize
def startup(self,
scenario: Scenario,
n: int,
initial_state: cdict,
initial_extra: cdict,
**kwargs) -> Tuple[cdict, cdict]:
initial_state, initial_extra = super().startup(scenario, n, initial_state, initial_extra, **kwargs)
initial_state.stepsize = jnp.ones(n) * initial_extra.parameters.stepsize
return initial_state, initial_extra
def adapt(self,
previous_ensemble_state: cdict,
previous_extra: cdict,
new_ensemble_state: cdict,
new_extra: cdict) -> Tuple[cdict, cdict]:
new_ensemble_state, new_extra = super().adapt(previous_ensemble_state, previous_extra,
new_ensemble_state, new_extra)
log_stepsize = jnp.log(new_extra.parameters.stepsize)
alpha_mean = jnp.average(new_ensemble_state.alpha,
weights=jnp.exp(new_ensemble_state.log_weight - new_ensemble_state.log_weight.max()))
new_log_stepsize = log_stepsize + new_extra.parameters.rm_stepsize \
* (alpha_mean - self.mcmc_sampler.tuning.target)
new_extra.parameters.stepsize = jnp.exp(new_log_stepsize)
new_ensemble_state.stepsize = jnp.ones(new_ensemble_state.value.shape[0]) * new_extra.parameters.stepsize
return new_ensemble_state, new_extra
def clean_chain(self,
scenario: Scenario,
chain_ensemble_state: cdict) -> cdict:
chain_ensemble_state = super().clean_chain(scenario, chain_ensemble_state)
chain_ensemble_state.stepsize = chain_ensemble_state.stepsize[:, 0]
return chain_ensemble_state
|
import logging
from collections import OrderedDict
import torch
from .SRRaGAN_model import SRRaGANModel
from .base_model import nullcast
logger = logging.getLogger('base')
from . import optimizers
from . import schedulers
from dataops.batchaug import BatchAug
from options.options import opt_get
class SRFlowModel(SRRaGANModel):
def __init__(self, opt, step):
super(SRFlowModel, self).__init__(opt, step)
train_opt = opt['train']
self.heats = opt_get(opt, ['val', 'heats'], 0.0)
self.n_sample = opt_get(opt, ['val', 'n_sample'], 1)
hr_size = opt_get(opt, ['datasets', 'train', 'HR_size'], 160)
self.lr_size = hr_size // opt['scale']
self.nll = None
if self.is_train:
"""
Initialize losses
"""
# nll loss
self.fl_weight = opt_get(self.opt, ['train', 'fl_weight'], 1)
"""
Prepare optimizer
"""
self.optDstep = True # no Discriminator being used
self.optimizers, self.optimizer_G = optimizers.get_optimizers_filter(
None, None, self.netG, train_opt, logger, self.optimizers, param_filter='RRDB')
"""
Prepare schedulers
"""
self.schedulers = schedulers.get_schedulers(
optimizers=self.optimizers, schedulers=self.schedulers, train_opt=train_opt)
"""
Set RRDB training state
"""
train_RRDB_delay = opt_get(self.opt, ['network_G', 'train_RRDB_delay'])
if train_RRDB_delay is not None and step < int(train_RRDB_delay * self.opt['train']['niter']) \
and self.netG.module.RRDB_training:
if self.netG.module.set_rrdb_training(False):
logger.info('RRDB module frozen, will unfreeze at iter: {}'.format(
int(train_RRDB_delay * self.opt['train']['niter'])))
# TODO: CEM is WIP
# def forward(self, gt=None, lr=None, z=None, eps_std=None, reverse=False,
# epses=None, reverse_with_grad=False, lr_enc=None, add_gt_noise=False,
# step=None, y_label=None, CEM_net=None)
# """
# Run forward pass G(LR); called by <optimize_parameters> and <test> functions.
# Can be used either with 'data' passed directly or loaded 'self.var_L'.
# CEM_net can be used during inference to pass different CEM wrappers.
# """
# if isinstance(lr, torch.Tensor):
# gt=gt, lr=lr
# else:
# gt=self.real_H, lr=self.var_L
# if CEM_net is not None:
# wrapped_netG = CEM_net.WrapArchitecture(self.netG)
# net_out = wrapped_netG(gt=gt, lr=lr, z=z, eps_std=eps_std, reverse=reverse,
# epses=epses, reverse_with_grad=reverse_with_grad,
# lr_enc=lr_enc, add_gt_noise=add_gt_noise, step=step,
# y_label=y_label)
# else:
# net_out = self.netG(gt=gt, lr=lr, z=z, eps_std=eps_std, reverse=reverse,
# epses=epses, reverse_with_grad=reverse_with_grad,
# lr_enc=lr_enc, add_gt_noise=add_gt_noise, step=step,
# y_label=y_label)
# if reverse:
# sr, logdet = net_out
# return sr, logdet
# else:
# z, nll, y_logits = net_out
# return z, nll, y_logits
def add_optimizer_and_scheduler_RRDB(self, train_opt):
#Note: this function from the original SRFLow code seems partially broken.
#Since the RRDB optimizer is being created on init, this is not being used
# optimizers
assert len(self.optimizers) == 1, self.optimizers
assert len(self.optimizer_G.param_groups[1]['params']) == 0, self.optimizer_G.param_groups[1]
for k, v in self.netG.named_parameters(): # can optimize for a part of the model
if v.requires_grad:
if '.RRDB.' in k:
self.optimizer_G.param_groups[1]['params'].append(v)
assert len(self.optimizer_G.param_groups[1]['params']) > 0
def optimize_parameters(self, step):
# unfreeze RRDB module if train_RRDB_delay is set
train_RRDB_delay = opt_get(self.opt, ['network_G', 'train_RRDB_delay'])
if train_RRDB_delay is not None and \
int(step/self.accumulations) > int(train_RRDB_delay * self.opt['train']['niter']) \
and not self.netG.module.RRDB_training:
if self.netG.module.set_rrdb_training(True):
logger.info('Unfreezing RRDB module.')
if len(self.optimizers) == 1:
# add the RRDB optimizer only if missing
self.add_optimizer_and_scheduler_RRDB(self.opt['train'])
# self.print_rrdb_state()
self.netG.train()
"""
Calculate and log losses
"""
l_g_total = 0
if self.fl_weight > 0:
# compute the negative log-likelihood of the output z assuming a unit-norm Gaussian prior
# with self.cast(): # needs testing, reduced precision could affect results
z, nll, y_logits = self.netG(gt=self.real_H, lr=self.var_L, reverse=False)
nll_loss = torch.mean(nll)
l_g_nll = self.fl_weight * nll_loss
# # /with self.cast():
self.log_dict['nll_loss'] = l_g_nll.item()
l_g_total += l_g_nll / self.accumulations
if self.generatorlosses.loss_list or self.precisegeneratorlosses.loss_list:
# batch (mixup) augmentations
aug = None
if self.mixup:
self.real_H, self.var_L, mask, aug = BatchAug(
self.real_H, self.var_L,
self.mixopts, self.mixprob, self.mixalpha,
self.aux_mixprob, self.aux_mixalpha, self.mix_p
)
with self.cast():
z = self.get_z(heat=0, seed=None, batch_size=self.var_L.shape[0], lr_shape=self.var_L.shape)
self.fake_H, logdet = self.netG(lr=self.var_L, z=z, eps_std=0, reverse=True, reverse_with_grad=True)
# batch (mixup) augmentations
# cutout-ed pixels are discarded when calculating loss by masking removed pixels
if aug == "cutout":
self.fake_H, self.real_H = self.fake_H*mask, self.real_H*mask
# TODO: CEM is WIP
# unpad images if using CEM
# if self.CEM:
# self.fake_H = self.CEM_net.HR_unpadder(self.fake_H)
# self.real_H = self.CEM_net.HR_unpadder(self.real_H)
# self.var_ref = self.CEM_net.HR_unpadder(self.var_ref)
if self.generatorlosses.loss_list:
with self.cast():
# regular losses
loss_results, self.log_dict = self.generatorlosses(self.fake_H, self.real_H, self.log_dict, self.f_low)
l_g_total += sum(loss_results) / self.accumulations
if self.precisegeneratorlosses.loss_list:
# high precision generator losses (can be affected by AMP half precision)
precise_loss_results, self.log_dict = self.precisegeneratorlosses(
self.fake_H, self.real_H, self.log_dict, self.f_low)
l_g_total += sum(precise_loss_results) / self.accumulations
if self.amp:
self.amp_scaler.scale(l_g_total).backward()
else:
l_g_total.backward()
# only step and clear gradient if virtual batch has completed
if (step + 1) % self.accumulations == 0:
if self.amp:
self.amp_scaler.step(self.optimizer_G)
self.amp_scaler.update()
else:
self.optimizer_G.step()
self.optimizer_G.zero_grad()
self.optGstep = True
def print_rrdb_state(self):
for name, param in self.netG.module.named_parameters():
if "RRDB.conv_first.weight" in name:
print(name, param.requires_grad, param.data.abs().sum())
print('params', [len(p['params']) for p in self.optimizer_G.param_groups])
def test(self, CEM_net=None):
self.netG.eval()
self.fake_H = {}
for heat in self.heats:
for i in range(self.n_sample):
z = self.get_z(heat, seed=None, batch_size=self.var_L.shape[0], lr_shape=self.var_L.shape)
with torch.no_grad():
self.fake_H[(heat, i)], logdet = self.netG(lr=self.var_L, z=z, eps_std=heat, reverse=True)
with torch.no_grad():
_, nll, _ = self.netG(gt=self.real_H, lr=self.var_L, reverse=False)
self.netG.train()
self.nll = nll.mean().item()
# TODO
def get_encode_nll(self, lq, gt):
self.netG.eval()
with torch.no_grad():
_, nll, _ = self.netG(gt=gt, lr=lq, reverse=False)
self.netG.train()
return nll.mean().item()
# TODO: only used for testing code
def get_sr(self, lq, heat=None, seed=None, z=None, epses=None):
return self.get_sr_with_z(lq, heat, seed, z, epses)[0]
# TODO
def get_encode_z(self, lq, gt, epses=None, add_gt_noise=True):
self.netG.eval()
with torch.no_grad():
z, _, _ = self.netG(gt=gt, lr=lq, reverse=False, epses=epses, add_gt_noise=add_gt_noise)
self.netG.train()
return z
# TODO
def get_encode_z_and_nll(self, lq, gt, epses=None, add_gt_noise=True):
self.netG.eval()
with torch.no_grad():
z, nll, _ = self.netG(gt=gt, lr=lq, reverse=False, epses=epses, add_gt_noise=add_gt_noise)
self.netG.train()
return z, nll
# TODO: used by get_sr
def get_sr_with_z(self, lq, heat=None, seed=None, z=None, epses=None):
self.netG.eval()
z = self.get_z(heat, seed, batch_size=lq.shape[0], lr_shape=lq.shape) if z is None and epses is None else z
with torch.no_grad():
sr, logdet = self.netG(lr=lq, z=z, eps_std=heat, reverse=True, epses=epses)
self.netG.train()
return sr, z
# TODO: used in optimize_parameters and test
def get_z(self, heat, seed=None, batch_size=1, lr_shape=None):
if seed: torch.manual_seed(seed)
if opt_get(self.opt, ['network_G', 'flow', 'split', 'enable']):
C = self.netG.module.flowUpsamplerNet.C
H = int(self.opt['scale'] * lr_shape[2] // self.netG.module.flowUpsamplerNet.scaleH)
W = int(self.opt['scale'] * lr_shape[3] // self.netG.module.flowUpsamplerNet.scaleW)
size = (batch_size, C, H, W)
z = torch.normal(mean=0, std=heat, size=size) if heat > 0 else torch.zeros(
size)
else:
L = opt_get(self.opt, ['network_G', 'flow', 'L']) or 3
fac = 2 ** (L - 3)
z_size = int(self.lr_size // (2 ** (L - 3)))
z = torch.normal(mean=0, std=heat, size=(batch_size, 3 * 8 * 8 * fac * fac, z_size, z_size))
return z
def get_current_visuals(self, need_HR=True):
out_dict = OrderedDict()
out_dict['LR'] = self.var_L.detach()[0].float().cpu()
out_dict['SR'] = True
for heat in self.heats:
for i in range(self.n_sample):
out_dict[('SR', heat, i)] = self.fake_H[(heat, i)].detach()[0].float().cpu()
if need_HR:
out_dict['HR'] = self.real_H.detach()[0].float().cpu()
return out_dict
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: stt.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='stt.proto',
package='SpeechToText',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\tstt.proto\x12\x0cSpeechToText\"I\n\x0bSpeechChunk\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\x0c\x12\r\n\x05token\x18\x02 \x01(\t\x12\x0c\n\x04lang\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x65mo\x18\x04 \x01(\t\"\x91\x01\n\x0fTranscriptChunk\x12\x0b\n\x03\x61sr\x18\x01 \x01(\t\x12\x12\n\ntranscript\x18\x02 \x01(\t\x12\x10\n\x08is_final\x18\x03 \x01(\x08\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x0e\n\x06\x61nswer\x18\x05 \x01(\t\x12\x11\n\timage_url\x18\x06 \x01(\t\x12\x14\n\x0cimage_yes_no\x18\x07 \x01(\t2\\\n\x08Listener\x12P\n\x0e\x44oSpeechToText\x12\x19.SpeechToText.SpeechChunk\x1a\x1d.SpeechToText.TranscriptChunk\"\x00(\x01\x30\x01\x62\x06proto3')
)
_SPEECHCHUNK = _descriptor.Descriptor(
name='SpeechChunk',
full_name='SpeechToText.SpeechChunk',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='content', full_name='SpeechToText.SpeechChunk.content', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='token', full_name='SpeechToText.SpeechChunk.token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lang', full_name='SpeechToText.SpeechChunk.lang', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='demo', full_name='SpeechToText.SpeechChunk.demo', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=27,
serialized_end=100,
)
_TRANSCRIPTCHUNK = _descriptor.Descriptor(
name='TranscriptChunk',
full_name='SpeechToText.TranscriptChunk',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='asr', full_name='SpeechToText.TranscriptChunk.asr', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transcript', full_name='SpeechToText.TranscriptChunk.transcript', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_final', full_name='SpeechToText.TranscriptChunk.is_final', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confidence', full_name='SpeechToText.TranscriptChunk.confidence', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='answer', full_name='SpeechToText.TranscriptChunk.answer', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_url', full_name='SpeechToText.TranscriptChunk.image_url', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_yes_no', full_name='SpeechToText.TranscriptChunk.image_yes_no', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=103,
serialized_end=248,
)
DESCRIPTOR.message_types_by_name['SpeechChunk'] = _SPEECHCHUNK
DESCRIPTOR.message_types_by_name['TranscriptChunk'] = _TRANSCRIPTCHUNK
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SpeechChunk = _reflection.GeneratedProtocolMessageType('SpeechChunk', (_message.Message,), dict(
DESCRIPTOR = _SPEECHCHUNK,
__module__ = 'stt_pb2'
# @@protoc_insertion_point(class_scope:SpeechToText.SpeechChunk)
))
_sym_db.RegisterMessage(SpeechChunk)
TranscriptChunk = _reflection.GeneratedProtocolMessageType('TranscriptChunk', (_message.Message,), dict(
DESCRIPTOR = _TRANSCRIPTCHUNK,
__module__ = 'stt_pb2'
# @@protoc_insertion_point(class_scope:SpeechToText.TranscriptChunk)
))
_sym_db.RegisterMessage(TranscriptChunk)
_LISTENER = _descriptor.ServiceDescriptor(
name='Listener',
full_name='SpeechToText.Listener',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=250,
serialized_end=342,
methods=[
_descriptor.MethodDescriptor(
name='DoSpeechToText',
full_name='SpeechToText.Listener.DoSpeechToText',
index=0,
containing_service=None,
input_type=_SPEECHCHUNK,
output_type=_TRANSCRIPTCHUNK,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_LISTENER)
DESCRIPTOR.services_by_name['Listener'] = _LISTENER
# @@protoc_insertion_point(module_scope)
|
from ctypes import *
import os, sys
dir = os.path.dirname(sys.modules["test"].__file__)
if sys.platform == "win32":
libName = "test.dll"
elif sys.platform == "darwin":
libName = "libtest.dylib"
else:
libName = "libtest.so"
dll = cdll.LoadLibrary(os.path.join(dir, libName))
class testError(Exception):
pass
SIMPLE_CONST = 123
SimpleEnum = c_byte
FIRST = 0
SECOND = 1
THIRD = 2
def simple_call(a):
"""
Returns the integer passed in.
"""
result = dll.test_simple_call(a)
return result
class SimpleObj(Structure):
_fields_ = [
("simple_a", c_longlong),
("simple_b", c_byte),
("simple_c", c_bool)
]
def __init__(self, simple_a, simple_b, simple_c):
self.simple_a = simple_a
self.simple_b = simple_b
self.simple_c = simple_c
def __eq__(self, obj):
return self.simple_a == obj.simple_a and self.simple_b == obj.simple_b and self.simple_c == obj.simple_c
class SimpleRefObj(Structure):
_fields_ = [("ref", c_ulonglong)]
def __bool__(self):
return self.ref != None
def __eq__(self, obj):
return self.ref == obj.ref
def __del__(self):
dll.test_simple_ref_obj_unref(self)
def __init__(self):
result = dll.test_new_simple_ref_obj()
self.ref = result
@property
def simple_ref_a(self):
return dll.test_simple_ref_obj_get_simple_ref_a(self)
@simple_ref_a.setter
def simple_ref_a(self, simple_ref_a):
dll.test_simple_ref_obj_set_simple_ref_a(self, simple_ref_a)
@property
def simple_ref_b(self):
return dll.test_simple_ref_obj_get_simple_ref_b(self)
@simple_ref_b.setter
def simple_ref_b(self, simple_ref_b):
dll.test_simple_ref_obj_set_simple_ref_b(self, simple_ref_b)
def doit(self):
"""
Does some thing with SimpleRefObj.
"""
dll.test_simple_ref_obj_doit(self)
class SeqInt(Structure):
_fields_ = [("ref", c_ulonglong)]
def __bool__(self):
return self.ref != None
def __eq__(self, obj):
return self.ref == obj.ref
def __del__(self):
dll.test_seq_int_unref(self)
def __init__(self):
self.ref = dll.test_new_seq_int()
def __len__(self):
return dll.test_seq_int_len(self)
def __getitem__(self, index):
return dll.test_seq_int_get(self, index)
def __setitem__(self, index, value):
dll.test_seq_int_set(self, index, value)
def __delitem__(self, index):
dll.test_seq_int_delete(self, index)
def append(self, value):
dll.test_seq_int_add(self, value)
def clear(self):
dll.test_seq_int_clear(self)
class RefObjWithSeq(Structure):
_fields_ = [("ref", c_ulonglong)]
def __bool__(self):
return self.ref != None
def __eq__(self, obj):
return self.ref == obj.ref
def __del__(self):
dll.test_ref_obj_with_seq_unref(self)
def __init__(self):
result = dll.test_new_ref_obj_with_seq()
self.ref = result
class RefObjWithSeqData:
def __init__(self, ref_obj_with_seq):
self.ref_obj_with_seq = ref_obj_with_seq
def __len__(self):
return dll.test_ref_obj_with_seq_data_len(self.ref_obj_with_seq)
def __getitem__(self, index):
return dll.test_ref_obj_with_seq_data_get(self.ref_obj_with_seq, index)
def __setitem__(self, index, value):
dll.test_ref_obj_with_seq_data_set(self.ref_obj_with_seq, index, value)
def __delitem__(self, index):
dll.test_ref_obj_with_seq_data_delete(self.ref_obj_with_seq, index)
def append(self, value):
dll.test_ref_obj_with_seq_data_add(self.ref_obj_with_seq, value)
def clear(self):
dll.test_ref_obj_with_seq_data_clear(self.ref_obj_with_seq)
@property
def data(self):
return self.RefObjWithSeqData(self)
class SimpleObjWithProc(Structure):
_fields_ = [
("simple_a", c_longlong),
("simple_b", c_byte),
("simple_c", c_bool)
]
def __init__(self, simple_a, simple_b, simple_c):
self.simple_a = simple_a
self.simple_b = simple_b
self.simple_c = simple_c
def __eq__(self, obj):
return self.simple_a == obj.simple_a and self.simple_b == obj.simple_b and self.simple_c == obj.simple_c
def extra_proc(self):
dll.test_simple_obj_with_proc_extra_proc(self)
dll.test_simple_call.argtypes = [c_longlong]
dll.test_simple_call.restype = c_longlong
dll.test_simple_ref_obj_unref.argtypes = [SimpleRefObj]
dll.test_simple_ref_obj_unref.restype = None
dll.test_new_simple_ref_obj.argtypes = []
dll.test_new_simple_ref_obj.restype = c_ulonglong
dll.test_simple_ref_obj_get_simple_ref_a.argtypes = [SimpleRefObj]
dll.test_simple_ref_obj_get_simple_ref_a.restype = c_longlong
dll.test_simple_ref_obj_set_simple_ref_a.argtypes = [SimpleRefObj, c_longlong]
dll.test_simple_ref_obj_set_simple_ref_a.restype = None
dll.test_simple_ref_obj_get_simple_ref_b.argtypes = [SimpleRefObj]
dll.test_simple_ref_obj_get_simple_ref_b.restype = c_byte
dll.test_simple_ref_obj_set_simple_ref_b.argtypes = [SimpleRefObj, c_byte]
dll.test_simple_ref_obj_set_simple_ref_b.restype = None
dll.test_simple_ref_obj_doit.argtypes = [SimpleRefObj]
dll.test_simple_ref_obj_doit.restype = None
dll.test_seq_int_unref.argtypes = [SeqInt]
dll.test_seq_int_unref.restype = None
dll.test_new_seq_int.argtypes = []
dll.test_new_seq_int.restype = c_ulonglong
dll.test_seq_int_len.argtypes = [SeqInt]
dll.test_seq_int_len.restype = c_longlong
dll.test_seq_int_get.argtypes = [SeqInt, c_longlong]
dll.test_seq_int_get.restype = c_longlong
dll.test_seq_int_set.argtypes = [SeqInt, c_longlong, c_longlong]
dll.test_seq_int_set.restype = None
dll.test_seq_int_delete.argtypes = [SeqInt, c_longlong]
dll.test_seq_int_delete.restype = None
dll.test_seq_int_add.argtypes = [SeqInt, c_longlong]
dll.test_seq_int_add.restype = None
dll.test_seq_int_clear.argtypes = [SeqInt]
dll.test_seq_int_clear.restype = None
dll.test_ref_obj_with_seq_unref.argtypes = [RefObjWithSeq]
dll.test_ref_obj_with_seq_unref.restype = None
dll.test_new_ref_obj_with_seq.argtypes = []
dll.test_new_ref_obj_with_seq.restype = c_ulonglong
dll.test_ref_obj_with_seq_data_len.argtypes = [RefObjWithSeq]
dll.test_ref_obj_with_seq_data_len.restype = c_longlong
dll.test_ref_obj_with_seq_data_get.argtypes = [RefObjWithSeq, c_longlong]
dll.test_ref_obj_with_seq_data_get.restype = c_byte
dll.test_ref_obj_with_seq_data_set.argtypes = [RefObjWithSeq, c_longlong, c_byte]
dll.test_ref_obj_with_seq_data_set.restype = None
dll.test_ref_obj_with_seq_data_delete.argtypes = [RefObjWithSeq, c_longlong]
dll.test_ref_obj_with_seq_data_delete.restype = None
dll.test_ref_obj_with_seq_data_add.argtypes = [RefObjWithSeq, c_byte]
dll.test_ref_obj_with_seq_data_add.restype = None
dll.test_ref_obj_with_seq_data_clear.argtypes = [RefObjWithSeq]
dll.test_ref_obj_with_seq_data_clear.restype = None
dll.test_simple_obj_with_proc_extra_proc.argtypes = [SimpleObjWithProc]
dll.test_simple_obj_with_proc_extra_proc.restype = None
|
#!/usr/bin/env python
import rospy
from rover_state_mach.msg import RoverStateMsg
from std_msgs.msg import String
from sensor_msgs.msg import NavSatFix
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
from actionlib_msgs.msg import GoalStatusArray
import rosparam
class status_handler:
def __init__(self):
self.namespace = ' [ Rover.StatusHandler ] '
self.initaliseTimeout = rospy.get_param('RoverSmach/timeouts/initialiseTimeout',600)
self.readyTimeout = rospy.get_param('RoverSmach/timeouts/readyTimeout',600000)
self.findImageTimeout = rospy.get_param('RoverSmach/timeouts/findImageTimeout',600000)
self.reachImageTimeout = rospy.get_param('RoverSmach/timeouts/reachImageTimeout',600000)
self.gpsReached = False
self.imageDetected = False
self.imageReached = False
#Parameters for Initalise
self.gpsWorking = False
self.imuWorking = False
self.encoderWorking = False
self.allSensorsWorking = False
#Parameter for Waypoint
self.gotWayPoint = False
#Parameter for Reach Image
self.goBack = False
#Parameter for attribute
self.movementAttribute = rospy.get_param('RoverSmach/attributes/movementAttribute',1) #0 for pure navigation, 1 for navigation + image searching.
self.wpStatus = 0
self.state = 0
def start(self):
self.wp_topic = rospy.get_param('RoverSmach/sub_topics/sub_waypoint','/waypoint_topic')
self.imu_topic = rospy.get_param('RoverSmach/sub_topics/sub_imu','/imu_topic')
self.gps_topic = rospy.get_param('RoverSmach/sub_topics/sub_gps','/gps_topic')
self.encoder_topic = rospy.get_param('RoverSmach/sub_topics/sub_encoder','/encoder_topic')
self.image_detect_topic = rospy.get_param('RoverSmach/sub_topics/sub_image_detect','/px_topic')
self.image_reach_topic = rospy.get_param('RoverSmach/sub_topics/sub_reach_image','/image_reach_topic')
self.rover_state_topic = rospy.get_param('RoverSmach/pub_topics/pub_rover_state','/rover_state_topic')
rospy.Subscriber(self.wp_topic, String, self.waypoint_callback) # Listen waypoints
rospy.Subscriber(self.gps_topic, NavSatFix, self.gps_callback) # Listen Gps
rospy.Subscriber(self.imu_topic, Imu, self.imu_callback) # Listen IMU
rospy.Subscriber(self.encoder_topic, Odometry, self.encoder_callback) # Listen Encoder
rospy.Subscriber(self.image_detect_topic, String, self.image_detect_callback) # Listen detecting image
rospy.Subscriber(self.image_reach_topic, String, self.image_reach_callback) # Listen reaching image
self.state_pub = rospy.Publisher(self.rover_state_topic, RoverStateMsg, queue_size=10)
rospy.Subscriber(self.rover_state_topic,RoverStateMsg,self.state_callback)
def state_callback(self,data):
self.state = data.state
#print(str(self.state))
def waypoint_callback(self,data): ##TODO: Maybe comprassion with old waypoint ??
self.wp = data.data
#If there is a meaningful waypoint :
if self.wp == "1":
self.gotWayPoint = True
self.gpsReached = False
elif self.wp == "2":
self.gotWayPoint = False
self.gpsReached = True
else:
self.gotWayPoint = False
self.gpsReached = False
def gps_callback(self,data): ##TODO: Maybe covairance ??
self.currentGps = [data.latitude,data.longitude]
#If GPS module works correct :
if self.currentGps[0] != "0" and self.currentGps[1] != 0:
self.gpsWorking = True
else:
self.gpsWorking = False
def imu_callback(self,data): ##TODO: Maybe covairance ??
self.currentImuOrientation = [data.orientation.x, data.orientation.y, data.orientation.z, data.orientation.w]
self.currentImuAngularV = [data.angular_velocity.x, data.angular_velocity.y, data.angular_velocity.z]
self.currentImuLinearA = [data.linear_acceleration.x, data.linear_acceleration.y, data.linear_acceleration.z]
#If IMU works correct :
if self.currentImuOrientation[0] != '' and self.currentImuOrientation[1] != '':
self.imuWorking = True
else:
self.imuWorking = False
def encoder_callback(self,data): ##TODO: Maybe covairance ??
self.currentEncoderPose = [data.pose.pose.position.x, data.pose.pose.position.y,data.pose.pose.position.z]
#If Encoder works correct :
if self.currentEncoderPose[0] != '' and self.currentEncoderPose[1] != '':
self.encoderWorking = True
else:
self.encoderWorking = False
self.gpsReached = False
def image_detect_callback(self,data):
self.ballDetected = data.data
if self.ballDetected != "-":
if self.state == 3:
self.imageDetected = True
self.goBack = False
else:
if self.state == 3:
self.imageDetected = False
"""if self.state == 4:
self.goBack = True"""
def image_reach_callback(self,data):
self.ballReached = data.data
if self.ballReached == "1": ##TODO: The comprassion have to be right
self.imageReached = True
elif self.ballReached == "0":
self.imageReached = False
"""elif self.ballReached == "2": #!! GO BACK !!
self.goBack = True"""
def publishRoverState(self, state_msg): #Publish the state of Rover
self.state_pub.publish(state_msg)
def checkAllSensors(self): # Checks sensors for once.
if self.encoderWorking == True and self.gpsWorking == True and self.imuWorking == True:
self.allSensorsWorking = True
elif self.encoderWorking == False and self.gpsWorking == True and self.imuWorking == True: ##TODO: Decide if encoder is critical here.
rospy.logwarn(self.namespace + "All sensors are working except encoder.")
self.allSensorsWorking = True
else:
rospy.logerr(self.namespace + "There is an error!!")
self.allSensorsWorking = False
def deinitialise(self):
self.gpsReached = False
self.imageDetected = False
self.imageReached = False
#Parameters for Initalise
self.gpsWorking = False
self.imuWorking = False
self.encoderWorking = False
self.allSensorsWorking = False
#Parameter for Ready
self.gotWayPoint = False
#Parameter for Reach Image
self.goBack = False
|
from collections import OrderedDict
import json
from six import iteritems
from neomodel import Property
from neomodel.relationship_manager import RelationshipDefinition
class StructuredThingSerializer(object):
"""Initially made to only handle StructuredNodes
but found that it also worked for StructuredRels."""
def __init__(self, *args, **kwargs):
"""
Args:
instance (StructuredNode/StructuredRel): The thing to serialize.
fields (str[], optional): Fields to include.
Excludes subobject fields.
"""
self.instance = args[0]
self.fields = kwargs.get('fields', '__all__')
self.many = kwargs.get('many')
if self.many:
self.data = []
for i in self.instance:
entry_data = StructuredThingSerializer(i).data
self.data.append(entry_data)
else:
self.data = OrderedDict()
props = self.instance.defined_properties()
if self.should_include('id'):
self.data['id'] = self.instance.id
self.process_properties(props)
self.serialized_data = json.dumps(self.data)
def should_include(self, key):
return key in self.fields or self.fields == '__all__'
def process_properties(self, props):
for prop in iteritems(props):
self.process_property(prop)
def process_property(self, prop):
_, value = prop
if isinstance(value, Property):
self.process_neomodel_property(prop)
elif isinstance(value, RelationshipDefinition):
self.process_relationship_definition(prop)
else:
# If we get here, chances are neomodel has changed
msg = "Unexpected property received: {}".format(prop)
raise ValueError(msg)
def process_neomodel_property(self, prop):
key, _ = prop
if self.should_include(key):
self.data[key] = getattr(self.instance, key)
def process_relationship_definition(self, prop):
key, value = prop
if self.should_include(key):
rel_def = getattr(self.instance, key)
self.process_rel_def_nodes(rel_def, key)
def process_rel_def_nodes(self, rel_def, key):
self.data[key] = []
for node in rel_def:
self.process_rel_def_node(rel_def, node, key)
def process_rel_def_node(self, rel_def, node, key):
rel_def_data = StructuredThingSerializer(node).data
rels = rel_def.all_relationships(node)
self.process_relationships(rels, rel_def_data, key)
def process_relationships(self, rels, rel_def_data, key):
for rel in rels:
self.process_relationship(rel, rel_def_data, key)
self.data[key].append(rel_def_data)
def process_relationship(self, rel, rel_def_data, key):
rel_data = StructuredThingSerializer(rel).data
rel_data.pop('id')
if rel_data:
rel_def_data.update(rel_data)
|
from draw.DrawingPatternPoint import DrawingPatternPoint
RIP_PATTERN = [
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(-1, 0, True),
DrawingPatternPoint(-1, 0, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(0, -1, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(-1, 0, True),
DrawingPatternPoint(-1, 0, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(0, 1, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(1, 0, True),
DrawingPatternPoint(1, 0, True),
]
|
#!/usr/bin/env python
# Info module template
#############################################
# WARNING #
#############################################
#
# This file is auto generated by
# https://github.com/jgroom33/vmware_rest_code_generator
#
# Do not edit this file manually.
#
# Changes should be made in the swagger used to
# generate this file or in the generator
#
#############################################
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import socket
import json
DOCUMENTATION = """
module: configmgmt_api_v1_schedules
short_description: Handle resource of type configmgmt_api_v1_schedules
description: Handle resource of type configmgmt_api_v1_schedules
options:
additionalAttributes:
description:
- Used by I(state=['post'])
type: dict
configMgmtJobId:
description:
- Used by I(state=['post'])
type: str
frequency:
description:
- List of schedule frequencies, Allowed values ONCE, DAILY, WEEKLY, MONTHLY
- Used by I(state=['get'])
type: str
friday:
description:
- Friday, Allowed values ON or OFF
- Used by I(state=['get'])
type: str
fromNextOperationTime:
description:
- (Optional) Time of next operation on Network Construct
- Used by I(state=['get'])
type: str
fromRecentOperationTime:
description:
- (Optional) Time of recent operation on Network Construct
- Used by I(state=['get'])
type: str
id:
description:
- Used by I(state=['post'])
type: str
lastExecutionTime:
description:
- Used by I(state=['post'])
type: str
limit:
description:
- The size of a returned page
- Used by I(state=['get'])
type: str
metaDataFields:
description:
- '(Optional) List of meta data to be included. The allowed values are: type,
frequency, state, weekDays'
- Used by I(state=['get'])
type: str
monday:
description:
- Monday, Allowed values ON or OFF
- Used by I(state=['get'])
type: str
name:
description:
- List of schedule names
- Used by I(state=['get', 'post'])
type: str
nextExecutionTime:
description:
- Used by I(state=['post'])
type: str
numberOfNEs:
description:
- Used by I(state=['post'])
type: int
offset:
description:
- (Optional) Offset for current index of data to return
- Used by I(state=['get'])
type: str
profileName:
description:
- List of profile names
- Used by I(state=['get'])
type: str
releaseNumber:
description:
- List of releaseNumbers
- Used by I(state=['get'])
type: str
resourcePartitionInfo:
description:
- (Optional) Resource partition info
- Used by I(state=['get'])
type: str
saturday:
description:
- Saturdy, Allowed values ON or OFF
- Used by I(state=['get'])
type: str
scheduleDetails:
description:
- 'Validate attributes are:'
- ' - C(scheduleFrequency) (str): '
- ' - C(scheduleTime) (str): '
- ' - C(scheduleWeeklyDays) (dict): '
- Used by I(state=['post'])
type: dict
scheduleId:
description:
- List of schedule Ids
- Used by I(state=['get'])
type: str
searchFields:
description:
- (Optional) List of comma separated fields to search on. If none are specified,
all supported fields are implied.
- Used by I(state=['get'])
type: str
searchText:
description:
- (Optional) The searchable text
- Used by I(state=['get'])
type: str
sortBy:
description:
- (Optional) List of comma separated fields by which to sort the result. A dash
or negative sign before a field indicates descending order; by default ascending
order is used
- Used by I(state=['get'])
type: str
state:
choices:
- get
- post
description: []
type: str
sunday:
description:
- Sunday, Allowed values ON or OFF
- Used by I(state=['get'])
type: str
thursday:
description:
- Thursday, Allowed values ON or OFF
- Used by I(state=['get'])
type: str
toNextOperationTime:
description:
- (Optional) Time of next operation on Network Construct
- Used by I(state=['get'])
type: str
toRecentOperationTime:
description:
- (Optional) Time of recent operation on Network Construct
- Used by I(state=['get'])
type: str
tuesday:
description:
- Tuesday, Allowed values ON or OFF
- Used by I(state=['get'])
type: str
type:
description:
- List of schedule types
- Used by I(state=['get', 'post'])
type: str
typeGroup:
description:
- List of typeGroups
- Used by I(state=['get'])
type: str
wednesday:
description:
- Wednesday, Allowed values ON or OFF
- Used by I(state=['get'])
type: str
weekDays:
description:
- List of Schedule weekdays
- Used by I(state=['get'])
type: str
author: []
version_added: 1.0.0
requirements:
- python >= 3.6
"""
IN_QUERY_PARAMETER = [
"frequency",
"friday",
"fromNextOperationTime",
"fromRecentOperationTime",
"limit",
"metaDataFields",
"monday",
"offset",
"profileName",
"releaseNumber",
"resourcePartitionInfo",
"saturday",
"scheduleId",
"searchFields",
"searchText",
"sortBy",
"sunday",
"thursday",
"toNextOperationTime",
"toRecentOperationTime",
"tuesday",
"typeGroup",
"wednesday",
"weekDays",
]
from ansible.module_utils.basic import env_fallback
try:
from ansible_module.turbo.module import AnsibleTurboModule as AnsibleModule
except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ciena.mcp.plugins.module_utils.mcp import (
gen_args,
open_session,
update_changed_flag,
)
def prepare_argument_spec():
argument_spec = {
"mcp_hostname": dict(
type="str", required=False, fallback=(env_fallback, ["MCP_HOST"])
),
"mcp_username": dict(
type="str", required=False, fallback=(env_fallback, ["MCP_USER"])
),
"mcp_password": dict(
type="str",
required=False,
no_log=True,
fallback=(env_fallback, ["MCP_PASSWORD"]),
),
}
argument_spec["weekDays"] = {"type": "str", "operationIds": ["get"]}
argument_spec["wednesday"] = {"type": "str", "operationIds": ["get"]}
argument_spec["typeGroup"] = {"type": "str", "operationIds": ["get"]}
argument_spec["type"] = {"type": "str", "operationIds": ["get", "post"]}
argument_spec["tuesday"] = {"type": "str", "operationIds": ["get"]}
argument_spec["toRecentOperationTime"] = {"type": "str", "operationIds": ["get"]}
argument_spec["toNextOperationTime"] = {"type": "str", "operationIds": ["get"]}
argument_spec["thursday"] = {"type": "str", "operationIds": ["get"]}
argument_spec["sunday"] = {"type": "str", "operationIds": ["get"]}
argument_spec["state"] = {"type": "str", "choices": ["get", "post"]}
argument_spec["sortBy"] = {"type": "str", "operationIds": ["get"]}
argument_spec["searchText"] = {"type": "str", "operationIds": ["get"]}
argument_spec["searchFields"] = {"type": "str", "operationIds": ["get"]}
argument_spec["scheduleId"] = {"type": "str", "operationIds": ["get"]}
argument_spec["scheduleDetails"] = {"type": "dict", "operationIds": ["post"]}
argument_spec["saturday"] = {"type": "str", "operationIds": ["get"]}
argument_spec["resourcePartitionInfo"] = {"type": "str", "operationIds": ["get"]}
argument_spec["releaseNumber"] = {"type": "str", "operationIds": ["get"]}
argument_spec["profileName"] = {"type": "str", "operationIds": ["get"]}
argument_spec["offset"] = {"type": "str", "operationIds": ["get"]}
argument_spec["numberOfNEs"] = {"type": "int", "operationIds": ["post"]}
argument_spec["nextExecutionTime"] = {"type": "str", "operationIds": ["post"]}
argument_spec["name"] = {"type": "str", "operationIds": ["get", "post"]}
argument_spec["monday"] = {"type": "str", "operationIds": ["get"]}
argument_spec["metaDataFields"] = {"type": "str", "operationIds": ["get"]}
argument_spec["limit"] = {"type": "str", "operationIds": ["get"]}
argument_spec["lastExecutionTime"] = {"type": "str", "operationIds": ["post"]}
argument_spec["id"] = {"type": "str", "operationIds": ["post"]}
argument_spec["fromRecentOperationTime"] = {"type": "str", "operationIds": ["get"]}
argument_spec["fromNextOperationTime"] = {"type": "str", "operationIds": ["get"]}
argument_spec["friday"] = {"type": "str", "operationIds": ["get"]}
argument_spec["frequency"] = {"type": "str", "operationIds": ["get"]}
argument_spec["configMgmtJobId"] = {"type": "str", "operationIds": ["post"]}
argument_spec["additionalAttributes"] = {"type": "dict", "operationIds": ["post"]}
return argument_spec
async def main():
module_args = prepare_argument_spec()
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
session = await open_session(
mcp_hostname=module.params["mcp_hostname"],
mcp_username=module.params["mcp_username"],
mcp_password=module.params["mcp_password"],
)
result = await entry_point(module, session)
module.exit_json(**result)
def url(params):
return "https://{mcp_hostname}/configmgmt/api/v1/schedules".format(**params)
async def entry_point(module, session):
func = globals()[("_" + module.params["state"])]
return await func(module.params, session)
async def _get(params, session):
accepted_fields = ["name", "type"]
spec = {}
for i in accepted_fields:
if params[i] is not None:
spec[i] = params[i]
_url = "https://{mcp_hostname}/configmgmt/api/v1/schedules".format(
**params
) + gen_args(params, IN_QUERY_PARAMETER)
async with session.get(_url, json=spec) as resp:
content_types = [
"application/json-patch+json",
"application/vnd.api+json",
"application/json",
]
try:
if resp.headers["Content-Type"] in content_types:
_json = await resp.json()
else:
print("response Content-Type not supported")
except KeyError:
_json = {}
return await update_changed_flag(_json, resp.status, "get")
async def _post(params, session):
accepted_fields = [
"additionalAttributes",
"configMgmtJobId",
"id",
"lastExecutionTime",
"name",
"nextExecutionTime",
"numberOfNEs",
"scheduleDetails",
"type",
]
spec = {}
for i in accepted_fields:
if params[i] is not None:
spec[i] = params[i]
_url = "https://{mcp_hostname}/configmgmt/api/v1/schedules".format(
**params
) + gen_args(params, IN_QUERY_PARAMETER)
async with session.post(_url, json=spec) as resp:
content_types = [
"application/json-patch+json",
"application/vnd.api+json",
"application/json",
]
try:
if resp.headers["Content-Type"] in content_types:
_json = await resp.json()
else:
print("response Content-Type not supported")
except KeyError:
_json = {}
return await update_changed_flag(_json, resp.status, "post")
if __name__ == "__main__":
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
#!/usr/bin/env python3
from cereal import car
from selfdrive.car.chrysler.values import CAR
from selfdrive.car import STD_CARGO_KG, scale_rot_inertia, scale_tire_stiffness, gen_empty_fingerprint
from selfdrive.car.interfaces import CarInterfaceBase
ButtonType = car.CarState.ButtonEvent.Type
class CarInterface(CarInterfaceBase):
@staticmethod
def compute_gb(accel, speed):
return float(accel) / 3.0
@staticmethod
def get_params(candidate, fingerprint=None, car_fw=None):
if fingerprint is None:
fingerprint = gen_empty_fingerprint()
ret = CarInterfaceBase.get_std_params(candidate, fingerprint)
ret.carName = "chrysler"
ret.safetyModel = car.CarParams.SafetyModel.chrysler
# Chrysler port is a community feature, since we don't own one to test
ret.communityFeature = True
# Speed conversion: 20, 45 mph
ret.wheelbase = 3.089 # in meters for Pacifica Hybrid 2017
ret.steerRatio = 16.2 # Pacifica Hybrid 2017
ret.mass = 1964. + STD_CARGO_KG # kg curb weight Pacifica Hybrid 2017
### INDI TUNE ###
# innerLoopGain is curvature gain.
# outerLoopGain is lane centering gain.
# timeConstant is smoothness.
# actuatorEffectiveness is gain modulation based on accuracy of path.
# steerActuatorDelay is how far its looking ahead.
# steerRateCost is how eager the steering is to make sudden changes.
ret.lateralTuning.init('indi')
ret.lateralTuning.indi.innerLoopGainBP = [0, 20, 40]
ret.lateralTuning.indi.innerLoopGainV = [1.5, 2.5, 2.5]
ret.lateralTuning.indi.outerLoopGainBP = [0, 20, 40]
ret.lateralTuning.indi.outerLoopGainV = [2.5, 3.5, 4.0]
ret.lateralTuning.indi.timeConstantBP = [0, 20, 40]
ret.lateralTuning.indi.timeConstantV = [0.5, 0.8, 0.8]
ret.lateralTuning.indi.actuatorEffectivenessBP = [0, 10, 20]
ret.lateralTuning.indi.actuatorEffectivenessV = [2.0, 3.5, 4.0]
ret.steerActuatorDelay = 0.15
ret.steerRateCost = 0.45
ret.steerLimitTimer = 0.4
### TF PID TUNE ###
#ret.lateralTuning.pid.kpBP, ret.lateralTuning.pid.kiBP = [[0., 9., 20.], [0., 9., 20.]]
#ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.0125, 0.0375, 0.075], [0.0025, 0.0075, 0.0255]]
#ret.lateralTuning.pid.kf = 0.00000000000000000000006 # full torque for 10 deg at 80mph means 0.00007818594
#ret.steerActuatorDelay = 0.1
#ret.steerRateCost = 0.4
#ret.steerLimitTimer = 0.4
### STOCK TUNE ###
#ret.lateralTuning.pid.kpBP, ret.lateralTuning.pid.kiBP = [[9., 20.], [9., 20.]]
#ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.15, 0.30], [0.03, 0.05]]
#ret.lateralTuning.pid.kf = 0.00006 # full torque for 10 deg at 80mph means 0.00007818594
#ret.steerActuatorDelay = 0.1
#ret.steerRateCost = 0.7
#ret.steerLimitTimer = 0.4
if candidate in (CAR.JEEP_CHEROKEE, CAR.JEEP_CHEROKEE_2019):
ret.wheelbase = 2.91 # in meters
ret.steerRatio = 12.7
ret.steerActuatorDelay = 0.2 # in seconds
if candidate in (CAR.CHRYSLER_300_2018):
ret.wheelbase = 3.05308 # in meters
ret.steerRatio = 15.5 # 2013 V-6 (RWD) — 15.5:1 V-6 (AWD) — 16.5:1 V-8 (RWD) — 15.5:1 V-8 (AWD) — 16.5:1
ret.mass = 1828.0 + STD_CARGO_KG # 2013 V-6 RWD
ret.steerActuatorDelay = 0.38
# # ret.steerRateCost = 0.35
# # ret.lateralTuning.pid.kf = 0.00006 # full torque for 10 deg at 80mph means 0.00007818594
# ret.steerRateCost = 0.1
# ret.steerLimitTimer = 0.8
# ret.lateralTuning.init('indi')
# ret.lateralTuning.indi.innerLoopGain = 2.65 # 2.48
# ret.lateralTuning.indi.outerLoopGainBP = [0, 45 * 0.45, 65 * 0.45, 85 * 0.45]
# ret.lateralTuning.indi.outerLoopGainV = [0.55, 0.73, 1.58, 1.95]
# ret.lateralTuning.indi.timeConstant = 10.0
# ret.lateralTuning.indi.actuatorEffectiveness = 1.55
ret.centerToFront = ret.wheelbase * 0.44
ret.minSteerSpeed = 3.8 # m/s
if candidate in (CAR.PACIFICA_2019_HYBRID, CAR.PACIFICA_2020, CAR.JEEP_CHEROKEE_2019):
# TODO allow 2019 cars to steer down to 13 m/s if already engaged.
ret.minSteerSpeed = 17.5 # m/s 17 on the way up, 13 on the way down once engaged.
# starting with reasonable value for civic and scaling by mass and wheelbase
ret.rotationalInertia = scale_rot_inertia(ret.mass, ret.wheelbase)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront, ret.tireStiffnessRear = scale_tire_stiffness(ret.mass, ret.wheelbase, ret.centerToFront)
ret.enableCamera = True
ret.openpilotLongitudinalControl = True
return ret
# returns a car.CarState
def update(self, c, can_strings):
# ******************* do can recv *******************
self.cp.update_strings(can_strings)
self.cp_cam.update_strings(can_strings)
ret = self.CS.update(self.cp, self.cp_cam)
ret.canValid = self.cp.can_valid and self.cp_cam.can_valid
# speeds
ret.steeringRateLimited = self.CC.steer_rate_limited if self.CC is not None else False
# accel/decel button presses
buttonEvents = []
if self.CS.accelCruiseButton or self.CS.accelCruiseButtonChanged:
be = car.CarState.ButtonEvent.new_message()
be.type = ButtonType.accelCruise
be.pressed = self.CS.accelCruiseButton
buttonEvents.append(be)
if self.CS.decelCruiseButton or self.CS.decelCruiseButtonChanged:
be = car.CarState.ButtonEvent.new_message()
be.type = ButtonType.decelCruise
be.pressed = self.CS.decelCruiseButton
buttonEvents.append(be)
if self.CS.resumeCruiseButton or self.CS.resumeCruiseButtonChanged:
be = car.CarState.ButtonEvent.new_message()
be.type = ButtonType.resumeCruise
be.pressed = self.CS.resumeCruiseButton
buttonEvents.append(be)
ret.buttonEvents = buttonEvents
# events
events = self.create_common_events(ret, extra_gears=[car.CarState.GearShifter.low],
gas_resume_speed=2.)
if ret.vEgo < self.CP.minSteerSpeed:
events.add(car.CarEvent.EventName.belowSteerSpeed)
ret.events = events.to_msg()
# copy back carState packet to CS
self.CS.out = ret.as_reader()
return self.CS.out
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c):
if (self.CS.frame == -1):
return [] # if we haven't seen a frame 220, then do not update.
can_sends = self.CC.update(c.enabled, self.CS, c.actuators, c.cruiseControl.cancel, c.hudControl.visualAlert,
self.CS.out.cruiseState.speed, c.cruiseControl.targetSpeed)
return can_sends
|
# (c) 2021 by William H. Grover | wgrover@engr.ucr.edu | groverlab.org
import matplotlib.pyplot as plt
import csv, sys, numpy, math
class candy:
def __init__(self):
self.points = {"A":[], "R":[], "P":[], "O":[], "Y":[], "G":[], "L":[], "D":[], "W":[] }
def add(self, color, x, y):
self.points['A'].append([x, y])
self.points[color].append([x, y])
def arrayify(self):
for color in self.points:
self.points[color] = numpy.array(self.points[color])
def plot_all(self, color='gray', theta=0.0):
for x,y in self.points['A']:
x1, y1 = rot(x, y, theta)
plt.plot(x1, y1, "o", color=color, markeredgecolor="k")
# plt.plot(self.points['A'][:,0], self.points['A'][:,1], "o", color="gray", markeredgecolor="k")
def plot_colors(self):
for color in self.points:
if color not in ['A']:
for x, y in self.points[color]:
plt.text(x, y, color, horizontalalignment='center', verticalalignment='center')
def plot(self, color, theta, markercolor="gray"):
for x,y in self.points[color]:
x1, y1 = rot(x, y, theta)
plt.plot(x1, y1, "o", color=markercolor, markeredgecolor='k')
def plot_all_colors(self):
for color in self.points:
self.plot(color)
def recenter(self):
# recenter on 0,0:
mean_x = numpy.mean(self.points['A'][:,0])
mean_y = numpy.mean(self.points['A'][:,1])
for color in self.points:
self.points[color][:,0] = self.points[color][:,0] - mean_x
self.points[color][:,1] = self.points[color][:,1] - mean_y
# scale to +/- 1
x_scale = (numpy.max(self.points['A'][:,0]) - numpy.min(self.points['A'][:,0])) / 2.0
y_scale = (numpy.max(self.points['A'][:,1]) - numpy.min(self.points['A'][:,1])) / 2.0
for color in self.points:
self.points[color][:,0] = self.points[color][:,0] / x_scale
self.points[color][:,1] = self.points[color][:,1] / y_scale
def rot(x,y,theta):
return [x * math.cos(theta) + y * math.sin(theta), -x * math.sin(theta) + y * math.cos(theta)]
def compare(candy1, candy2):
bins = 100
totals = numpy.zeros(bins)
for color in candy1.points:
if color in ['A']:
print(color, len(candy1.points[color]), "(ignored)")
else:
print(color, len(candy1.points[color]))
for theta, n in zip(numpy.linspace(0,2.0*3.1415926, bins), range(bins)):
total = 0
for x1,y1 in candy1.points[color]:
x1,y1 = rot(x1,y1,theta) # rotate just the first candy?
min_distance = 9999999.99
for x2, y2 in candy2.points[color]:
dist = math.sqrt((x2-x1)**2 + (y2-y1)**2)
if dist < min_distance:
min_distance = dist
total += min_distance
totals[n] = totals[n] + total
# plt.plot(totals)
min_theta = 0.0
min_total = 99999999.99
for theta, total in zip(numpy.linspace(0,2.0*3.1415926, bins), totals):
if total < min_total:
min_total = total
min_theta = theta
print("min_total = %0.2f" % (min_total) )
print("min_theta = %0.2f" % (min_theta) )
return(min_total, min_theta)
candies = {}
for filename, candyID in zip(["1.1.txt", "1.2.txt", "1.3.txt", "1.4.txt", "1.5.txt", "1.6.txt", "1.7.txt", "1.8.txt", "1.9.txt",
"2.2.txt", "3.2.txt", "4.2.txt", "5.2.txt", "6.2.txt"],
["c11", "c12", "c13", "c14", "c15", "c16", "c17", "c18", "c19",
"c22", "c32", "c42", "c52", "c62"]):
c = candy()
print(filename)
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter="\t")
for color, x, y in csv_reader:
c.add(color, float(x), float(y))
c.arrayify()
c.recenter()
candies[candyID] = c
for candy in ["c12", "c22", "c32", "c42", "c52", "c62"]:
min_totals = []
min_totals.append(compare(candies[candy], candies["c11"])[0])
min_totals.append(compare(candies[candy], candies["c12"])[0])
min_totals.append(compare(candies[candy], candies["c13"])[0])
min_totals.append(compare(candies[candy], candies["c14"])[0])
min_totals.append(compare(candies[candy], candies["c15"])[0])
min_totals.append(compare(candies[candy], candies["c16"])[0])
min_totals.append(compare(candies[candy], candies["c17"])[0])
min_totals.append(compare(candies[candy], candies["c18"])[0])
min_totals.append(compare(candies[candy], candies["c19"])[0])
plt.plot(min_totals)
plt.show()
|
import torch
import torch.nn as nn
from ..builder import LOSSES
def binary_logistic_regression_loss(reg_score,
label,
threshold=0.5,
ratio_range=(1.05, 21),
eps=1e-5):
"""Binary Logistic Regression Loss."""
label = label.view(-1).to(reg_score.device)
reg_score = reg_score.contiguous().view(-1)
pmask = (label > threshold).float().to(reg_score.device)
num_positive = max(torch.sum(pmask), 1)
num_entries = len(label)
ratio = num_entries / num_positive
# clip ratio value between ratio_range
ratio = min(max(ratio, ratio_range[0]), ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (
1.0 - pmask) * torch.log(1.0 - reg_score + eps)
loss = -torch.mean(loss)
return loss
@LOSSES.register_module()
class BinaryLogisticRegressionLoss(nn.Module):
"""Binary Logistic Regression Loss.
It will calculate binary logistic regression loss given reg_score and
label.
"""
def forward(self,
reg_score,
label,
threshold=0.5,
ratio_range=(1.05, 21),
eps=1e-5):
"""Calculate Binary Logistic Regression Loss.
Args:
reg_score (torch.Tensor): Predicted score by model.
label (torch.Tensor): Groundtruth labels.
threshold (float): Threshold for positive instances.
Default: 0.5.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
return binary_logistic_regression_loss(reg_score, label, threshold,
ratio_range, eps)
|
import unittest
from baseclasses import BaseSolver
from baseclasses.utils import Error
from baseclasses.decorators import require_mpi
class SOLVER(BaseSolver):
def __init__(self, name, options={}, comm=None, checkDefaultOptions=True, caseSensitiveOptions=False):
"""Create an artificial class for testing"""
category = "Solver for testing BaseSolver"
defaultOptions = {
"boolOption": [bool, True],
"floatOption": [float, 10.0],
"intOption": [int, [1, 2, 3]],
"strOption": [str, ["str1", "str2", "str3"]],
"listOption": [list, []],
"multiOption": [(str, dict), {}],
}
immutableOptions = {"strOption"}
deprecatedOptions = {
"oldOption": "Use boolOption instead.",
}
informs = {
-1: "Failure -1",
0: "Success",
1: "Failure 1",
}
# Initialize the inherited BaseSolver
super().__init__(
name,
category,
defaultOptions=defaultOptions,
options=options,
immutableOptions=immutableOptions,
deprecatedOptions=deprecatedOptions,
comm=comm,
informs=informs,
checkDefaultOptions=checkDefaultOptions,
caseSensitiveOptions=caseSensitiveOptions,
)
class TestOptions(unittest.TestCase):
def test_options(self):
# initialize solver
floatValue_set = 200.0
intValue_set = 3
options = {"floatOption": floatValue_set, "intOption": intValue_set}
solver = SOLVER("test", options=options)
solver.printOptions()
# test getOption for initialized option
floatValue_get = solver.getOption("floatOption")
self.assertEqual(floatValue_set, floatValue_get)
# test getOption for default option
strValue_get = solver.getOption("strOption")
self.assertEqual("str1", strValue_get)
# test CaseInsensitiveDict
intValue_get1 = solver.getOption("intoption")
intValue_get2 = solver.getOption("INTOPTION")
self.assertEqual(intValue_get1, intValue_get2)
# test setOption
solver.setOption("boolOption", False)
boolValue_get = solver.getOption("boolOption")
self.assertEqual(False, boolValue_get)
# test List type options
listValue_get = solver.getOption("listOption")
self.assertEqual([], listValue_get)
listValue_set = [1, 2, 3]
solver.setOption("listOption", listValue_set)
listValue_get = solver.getOption("listOption")
self.assertEqual(listValue_set, listValue_get)
solver.printModifiedOptions()
# test options that accept multiple types
testValues = ["value", {"key": "value"}]
for multiValue_set in testValues:
solver.setOption("multiOption", multiValue_set)
multiValue_get = solver.getOption("multiOption")
self.assertEqual(multiValue_set, multiValue_get)
# test Errors
with self.assertRaises(Error) as context:
solver.getOption("invalidOption") # test invalid option in getOption
self.assertTrue("intOption" in context.exception.message) # check that intoption is offered as a suggestion
with self.assertRaises(Error) as context:
solver.setOption("invalidOption", 1) # test invalid option in setOption
self.assertTrue("intOption" in context.exception.message) # check that intoption is offered as a suggestion
with self.assertRaises(Error):
solver.setOption("intOption", 4) # test value not in list
with self.assertRaises(Error):
solver.setOption("intOption", "3") # test type checking with list
with self.assertRaises(Error):
solver.setOption("floatOption", 4) # test type checking without list
with self.assertRaises(Error):
solver.setOption("strOPTION", "str2") # test immutableOptions
with self.assertRaises(Error):
solver.setOption("oldoption", 4) # test deprecatedOptions
def test_checkDefaultOptions(self):
# initialize solver
solver = SOLVER("test", checkDefaultOptions=False)
solver.setOption("newOption", 1)
self.assertEqual(solver.getOption("newOption"), 1)
with self.assertRaises(Error):
solver.getOption("nonexistant option") # test that this name should be rejected
def test_caseSensitive(self):
# initialize solver
solver = SOLVER("test", caseSensitiveOptions=True)
with self.assertRaises(Error):
solver.getOption("booloption") # test that this name should be rejected
def test_getOptions(self):
solver = SOLVER("test")
options = solver.getOptions()
self.assertIn("floatOption", options)
self.assertEqual(len(options), 6)
def test_getModifiedOptions(self):
solver = SOLVER("test")
modifiedOptions = solver.getModifiedOptions()
self.assertEqual(len(modifiedOptions), 0)
solver.setOption("boolOption", False)
modifiedOptions = solver.getModifiedOptions()
self.assertEqual(list(modifiedOptions.keys()), ["boolOption"])
class TestComm(unittest.TestCase):
N_PROCS = 2
@require_mpi
def test_comm_with_mpi(self):
from mpi4py import MPI
# initialize solver
solver = SOLVER("testComm", comm=MPI.COMM_WORLD)
self.assertFalse(solver.comm is None)
solver.printOptions()
def test_comm_without_mpi(self):
# initialize solver
solver = SOLVER("testComm", comm=None)
self.assertTrue(solver.comm is None)
solver.printOptions() # this should print current options twice since comm is not set and N_PROCS=2
class TestInforms(unittest.TestCase):
def test_informs(self):
solver = SOLVER("testInforms")
self.assertEqual(solver.informs[0], "Success")
|
# coding: utf-8
from zeit.connector.resource import Resource
from zeit.importer.article import Article
from zeit.importer.highres import ImageHash
from zeit.importer.interfaces import DOC_NS, PRINT_NS
import datetime
import io
import logging
import logging.config
import lxml.etree
import optparse
import os
import pkg_resources
import re
import shutil
import sys
import unicodedata
import urllib.parse
import yaml
import zeit.connector.connector
import zeit.connector.interfaces
import zeit.importer.interfaces
import zope.component
log = logging.getLogger(__name__)
def mangleQPSName(qps_name):
qps_name = qps_name.replace("ƒ", "Ae") # Ä
qps_name = qps_name.replace("‹", "Ue") # Ü
qps_name = qps_name.replace("÷", "Oe") # Ö
qps_name = qps_name.replace("‰", "ae") # ä
qps_name = qps_name.replace("¸", "ue") # ü
qps_name = qps_name.replace("ˆ", "oe") # ö
qps_name = qps_name.replace("fl", "ss")
qps_name = qps_name.replace("&", "")
qps_name = qps_name.replace("?", "")
qps_name = qps_name.strip('_- ')
cname = re.compile(r'[\ \_.:;#+*/\']').sub('-', qps_name)
cname = re.compile(r'[^A-Za-z0-9\-]').sub('', cname)
cname = re.compile('-+').sub('-', cname)
return cname
def ensure_collection(unique_id):
"""If the target collection does not exist, it will be created."""
connector = zope.component.getUtility(zeit.connector.interfaces.IConnector)
path = urllib.parse.urlparse(unique_id).path.split('/')[1:]
unique_id = 'http://xml.zeit.de'
for segment in path:
unique_id = os.path.join(unique_id, segment)
try:
connector[unique_id]
except KeyError:
name = os.path.basename(unique_id)
res = Resource(unique_id, name, 'collection', io.BytesIO(b''))
connector.add(res)
log.debug('Created collection %s', unique_id)
return unique_id
def copyExportToArchive(input_dir):
settings = zope.component.getUtility(zeit.importer.interfaces.ISettings)
today = datetime.datetime.today()
archive_path = os.path.normpath('%s/%s/%s' % (
settings['k4_archive_dir'],
today.strftime("%Y"),
today.strftime("%m-%d-%a")))
if os.path.isdir(archive_path):
for i in range(1, 20):
tmp_path = '%s-%d' % (archive_path, i)
if os.path.isdir(tmp_path):
continue
else:
archive_path = tmp_path
break
shutil.copytree(input_dir, archive_path)
log.info('Copied input articles from %s to %s', input_dir, archive_path)
def run_dir(input_dir, product_id_in):
if not os.path.isdir(input_dir):
raise IOError("No such directory '%s'" % (input_dir,))
connector = zope.component.getUtility(zeit.connector.interfaces.IConnector)
settings = zope.component.getUtility(zeit.importer.interfaces.ISettings)
highres_images = None # Wait for the 1st article to tell us the volume
count = 0
cnames = []
k4_files = os.listdir(input_dir)
boxes = {}
articles = {}
error_occurred = False
for (k4_filename, k4_filepath) in [
(f, os.path.join(input_dir, f)) for f in k4_files]:
try:
if (os.path.isdir(k4_filepath)):
continue
elif k4_filename[0:4] == 'img_':
# We handle img-xml, when it is discovered inside the article
# XML.
continue
log.info('Importing %s', k4_filename)
doc = Article(k4_filepath)
jobname = doc.getAttributeValue(DOC_NS, 'jobname')
if not jobname:
raise Exception("Original name not found '%s'" % k4_filepath)
log.debug('k4name %s', jobname)
cname = jobname
if cname.endswith('.xml'):
cname = cname[:-4]
cname = mangleQPSName(cname)
if cname[0] == '_':
cname = cname[1:]
# Deduplicate filenames
if cname in cnames:
cname = cname + str(count)
cnames.append(cname)
# set extra metadata
doc.metadata.append(
('http://namespaces.zeit.de/CMS/document', 'file-name', cname))
doc.metadata.append(
('http://namespaces.zeit.de/CMS/document', 'export_cds', 'no'))
# create the new resource
log.debug('urlified %s', cname)
# get infos for archive paths
year = doc.getAttributeValue(DOC_NS, 'year')
volume = doc.getAttributeValue(DOC_NS, 'volume')
print_ressort = doc.getAttributeValue(PRINT_NS, 'ressort')
product_id = doc.get_product_id(product_id_in, k4_filename)
log.debug('product_id %s ', product_id)
import_folders = []
if not all([year, volume, print_ressort]):
raise ValueError('Missing metadata in %s', cname)
if highres_images is None:
highres_images = hash_highres_dir(year, volume)
print_ressort = mangleQPSName(print_ressort).lower()
import_root = ensure_collection(
os.path.join(settings['import_root'], product_id, year,
volume, print_ressort))
import_folders.append(import_root)
import_root_in = ensure_collection(
os.path.join(settings['import_root_in'], product_id, year,
volume, print_ressort))
import_folders.append(import_root_in)
try:
if doc.zon_images:
img_base_id = ensure_collection(
os.path.join(settings['import_root'], product_id,
year, volume, 'zon-images', cname))
set_zon_image_uniqueId(doc, img_base_id)
for xml_res, lowres, highres in create_image_resources(
input_dir, doc, img_base_id):
try:
lowres_hash = ImageHash(lowres.id, lowres.data)
except Exception as e:
log.warning(
'Could not hash %s: %s' % (lowres.id, e))
else:
highres_hash = lowres_hash.find_match(
highres_images)
if highres_hash:
highres.data = open(highres_hash.id, 'rb')
connector.add(highres)
connector.add(lowres)
connector.add(xml_res)
log.info("An image was imported for %s", cname)
log.info("All images were imported for %s", cname)
except Exception:
log.error("Some or all images for %s could not be imported.",
cname, exc_info=True)
doc.addAttributesToDoc(product_id, year, volume, cname)
new_xml = doc.to_string()
for import_folder in import_folders:
unique_id = os.path.join(import_folder, cname)
try:
connector[unique_id]
log.info("%s wurde _nicht_ neu importiert", unique_id)
continue
except KeyError:
if new_xml and 'Kasten' in unique_id:
boxes[unique_id] = (doc, cname)
elif new_xml:
articles[unique_id] = (doc, cname)
count = count + 1
log.info('Done importing %s', cname)
except Exception:
log.error('Error importing %s', k4_filename, exc_info=True)
error_occurred = True
continue
unintegrated_boxes = process_boxes(boxes, articles)
content = {}
content.update(articles)
content.update(unintegrated_boxes)
put_content(content)
if count > 0:
copyExportToArchive(input_dir)
else:
log.warning('No documents to import found in %s', input_dir)
return not error_occurred
def put_content(resources):
connector = zope.component.getUtility(zeit.connector.interfaces.IConnector)
for unique_id in resources.keys():
doc, cname = resources[unique_id]
res = Resource(
unique_id, cname, 'article',
io.BytesIO(doc.to_string().encode('utf-8')),
contentType='text/xml')
for prop in doc.metadata:
prop_val = re.sub(r'\&', ' + ', prop[2])
res.properties[(prop[1], prop[0])] = (prop_val)
log.info('Storing in CMS as %s/%s', unique_id, cname)
connector.add(res)
def process_boxes(boxes, articles):
no_corresponding_article = {}
for box_id in boxes.keys():
# Find belonging article
box_doc, box_cname = boxes[box_id]
box_xml = box_doc.doc
article_id = re.sub('-Kasten.*$', '', box_id)
if articles.get(article_id) is None:
no_corresponding_article[box_id] = boxes[box_id]
continue
doc, cname = articles.get(article_id)
article = doc.doc
log.info('Process box %s for %s', box_id, article_id)
# Extract coordinates and add to article
try:
extract_and_move_xml_elements(
box_xml.find("//Frame"), article.find('//Frames')[0])
new_box = lxml.etree.Element("box")
article.find('//body').append(new_box)
extract_and_move_xml_elements(
box_xml.find("//body").getchildren(), new_box)
except Exception:
log.error('Error processing box %s for %s', box_id, article_id,
exc_info=True)
continue
return no_corresponding_article
def extract_and_move_xml_elements(elements, new_parent):
for element in elements:
element.getparent().remove(element)
new_parent.append(element)
def load_access_mapping(access_source):
return {e.get('k4_id'): e.get('id') for e in access_source.xpath("//type")}
class ConnectorResolver(lxml.etree.Resolver):
def resolve(self, url, id, context):
if not url.startswith('http://xml.zeit.de/'):
return None
connector = zope.component.getUtility(
zeit.connector.interfaces.IConnector)
return self.resolve_file(connector[url].data, context)
def create_image_resources(input_dir, doc, img_base_id):
img_resources = []
for counter, elem in enumerate(doc.zon_images):
try:
vivi_name = elem.get('vivi_name')
path = _get_path(
os.path.join(input_dir, elem.get('k4_id')))
img_xml = lxml.etree.parse(path)
xml_resource = get_xml_img_resource(
img_xml, img_base_id, vivi_name)
lowres = get_prefixed_img_resource(
input_dir, img_xml, img_base_id, 'preview', vivi_name)
highres = get_prefixed_img_resource(
input_dir, img_xml, img_base_id, 'master', vivi_name)
img_resources.append((xml_resource, lowres, highres))
except FileNotFoundException:
log.error('Image %s/%s could not be processed', counter,
len(doc.zon_images), exc_info=True)
return img_resources
def _get_path(path):
""" For unknown reasons we get file names in different encodings, while
the lxml document has an utf-8 encoding. We tried a couple
patterns, that proved to work and catch all of the encoding cases we know
of."""
if os.path.isfile(path):
return path
try:
path_unicode = unicodedata.normalize('NFD', path).encode('utf-8')
if os.path.isfile(path):
return path_unicode
except UnicodeEncodeError:
log.error('Error finding path (1/3)', exc_info=True)
try:
path_unicode_2 = path.encode('utf-8')
if os.path.isfile(path):
return path_unicode_2
except UnicodeEncodeError:
log.error('Error finding path (2/3)', exc_info=True)
try:
path_iso = path.encode('cp1250')
if os.path.isfile(path_iso):
return path_iso
except (UnicodeDecodeError, UnicodeEncodeError):
log.error('Error finding path (3/3)', exc_info=True)
raise FileNotFoundException('Path %s could not be found' % path)
class FileNotFoundException(Exception):
pass
def set_zon_image_uniqueId(doc, img_base_id):
for elem in doc.zon_images:
vivi_name = elem.get('vivi_name')
elem.set("uniqueId", os.path.join(img_base_id, vivi_name))
def get_xml_img_resource(img_xml, img_base_id, name):
xml = create_img_xml(img_xml, name)
return Resource(
os.path.join(img_base_id, name), name, 'image-xml',
io.BytesIO(lxml.etree.tostring(xml, encoding='utf-8')),
contentType='text/xml')
def get_prefixed_img_resource(input_dir, img_xml, img_base_id, prefix, name):
normpath = '/'.join(
img_xml.find('/HEADER/LowResPath').text.replace(
'\\', '/').split('/')[1:])
path = os.path.join(input_dir, normpath)
path = _get_path(path)
name = '%s-%s.jpg' % (prefix, name)
return Resource(
os.path.join(img_base_id, name), name, 'image', open(path, 'rb'),
contentType='image/jpeg')
def hash_highres_dir(year, volume):
settings = zope.component.getUtility(zeit.importer.interfaces.ISettings)
k4_highres_dir = settings.get('k4_highres_dir', '')
directory = k4_highres_dir.format(year=year, volume=volume)
hashes = []
for path, _, files in os.walk(directory):
for filename in files:
try:
fp = os.path.join(path, filename)
hashes.append(ImageHash(fp, fp))
except Exception as e:
log.warning('Hashing error: {}'.format(e))
return hashes
def create_img_xml(xml, name):
img_group = lxml.etree.Element('image-group')
meta_type = lxml.etree.Element('attribute',
ns='http://namespaces.zeit.de/CMS/meta',
name='type')
meta_type.text = 'image-group'
img_group.append(meta_type)
img_caption = lxml.etree.Element('attribute',
ns='http://namespaces.zeit.de/CMS/image',
name='caption')
img_caption.text = xml.find('/HEADER/BUZ').get('value')
img_group.append(img_caption)
img_master = lxml.etree.Element('attribute',
ns='http://namespaces.zeit.de/CMS/image',
name='image_base_name')
img_master.text = name
img_group.append(img_master)
img_copyrights = lxml.etree.Element(
'attribute',
ns='http://namespaces.zeit.de/CMS/document',
name='copyrights')
img_copyrights.text = xml.find('/HEADER/CREDITS').text
img_group.append(img_copyrights)
# Yes, there's a typo in Licence, but that's how it's specified.
license_el = xml.find('/HEADER/Licence')
duration = {
'2 Wochen': 'P14D',
'6 Monate': 'P6M',
'unbegrenzt': 'P1000Y',
'keine': 'PT0S',
'(ohne)': 'PT0S',
'': 'PT0S'}
img_license = lxml.etree.Element('attribute',
ns='http://namespaces.zeit.de/CMS/image',
name='expires_in')
img_license.text = duration.get(license_el.get('value'), 'PT0S')
img_group.append(img_license)
return img_group
def _configure(config):
settings = config.pop('importer')
zope.component.provideUtility(settings, zeit.importer.interfaces.ISettings)
zope.component.provideUtility(zeit.connector.connector.Connector(
{'default': settings['connector_url']}))
def _configure_logging(config):
if 'loggers' in config:
logging.config.dictConfig(config)
def _configure_from_dav_xml():
connector = zope.component.getUtility(zeit.connector.interfaces.IConnector)
settings = zope.component.getUtility(zeit.importer.interfaces.ISettings)
try:
resource = connector[settings['import_config']]
except KeyError:
raise ValueError('Import configuration file %s not found',
settings.get('import_config', ''))
settings['product_names'] = {}
settings['product_ids'] = {}
settings['publication_ids'] = {}
tree = lxml.etree.fromstring(resource.data.read())
for p in tree.xpath('/config/product'):
k4_id = p.findtext('k4id')
label = p.findtext('label')
id = p.get('id')
if k4_id:
settings['product_names'][id] = label
settings['product_ids'][k4_id] = id
for ressort in p.xpath('ressort'):
settings['publication_ids'][
(k4_id, ressort.get('name'))] = ressort.get('id')
try:
connector[settings['ressortmap']]
except KeyError:
raise ValueError('Ressortmap file %s not found',
settings.get('ressortmap', ''))
parser = lxml.etree.XMLParser()
parser.resolvers.add(ConnectorResolver())
settings['k4_stylesheet'] = lxml.etree.XSLT(lxml.etree.parse(
pkg_resources.resource_filename(
__name__, 'stylesheets/k4import.xslt'), parser=parser))
settings['normalize_whitespace'] = lxml.etree.XSLT(lxml.etree.parse(
pkg_resources.resource_filename(
__name__, 'stylesheets/normalize_whitespace.xslt'), parser=parser))
access_source = lxml.etree.parse(connector[settings['access_source']].data)
settings['access_mapping'] = load_access_mapping(access_source)
def _parse_args():
parser = optparse.OptionParser("usage: %prog [options] arg")
parser.add_option("-i", "--indir", dest="input_dir",
help="directory with the k4 export files")
parser.add_option("-p", "--productid", dest="product_id",
help="product id to be used with every article")
parser.add_option("-c", "--config", dest="config_file",
help="path to configuration file")
(options, args) = parser.parse_args()
if not options.config_file:
options.config_file = os.environ.get('ZEIT_IMPORTER_CONFIG')
if not options.config_file:
raise ValueError('A configuration file is required.')
return options
def main():
options = _parse_args()
config = yaml.safe_load(open(options.config_file, 'r'))
_configure(config)
_configure_logging(config)
_configure_from_dav_xml()
settings = zope.component.getUtility(zeit.importer.interfaces.ISettings)
if not options.input_dir:
options.input_dir = settings['k4_export_dir']
log.info('No input directory given, assuming %s', options.input_dir)
try:
log.info('Start import of %s to %s', options.input_dir,
settings['import_root'])
success = run_dir(options.input_dir, options.product_id)
sys.exit(0 if success else 2)
except Exception:
log.error('Uncaught exception', exc_info=True)
raise # will exit with status 1
if __name__ == '__main__':
main()
|
#! cd .. && python3 -m tests.parser_test
import unittest
from tests.util import edit_distance, parsecmp, TOKEN
from daedalus.lexer import Token, Lexer
from daedalus.parser import Parser as ParserBase, ParseError
class Parser(ParserBase):
def __init__(self):
super(Parser, self).__init__()
self.disable_all_warnings = True
class ParserTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
def test_001_parse_brace(self):
with self.assertRaises(ParseError):
text = "{{{ }}"
Parser().parse(Lexer().lex(text))
def test_001_parse_paren(self):
with self.assertRaises(ParseError):
text = "("
Parser().parse(Lexer().lex(text))
def test_001_parse_bracket(self):
with self.assertRaises(ParseError):
text = "["
Parser().parse(Lexer().lex(text))
def test_001_hard(self):
text = """{[0](){}}"""
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_OBJECT', '{}',
TOKEN('T_FUNCTION', '',
TOKEN('T_LIST', '[]',
TOKEN('T_NUMBER', '0')),
TOKEN('T_ARGLIST', '()'),
TOKEN('T_BLOCK', '{}'))))
self.assertFalse(parsecmp(expected, ast, False))
class ParserTypesTestCase(unittest.TestCase):
def _test(self, text, expected):
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_number_float(self):
text = "3.14"
expected = TOKEN('T_MODULE', '', TOKEN('T_NUMBER', '3.14'))
self._test(text, expected)
def test_001_number_exponent(self):
text = "1e-5"
expected = TOKEN('T_MODULE', '', TOKEN('T_NUMBER', '1e-5'))
self._test(text, expected)
class ParserUnaryOpTestCase(unittest.TestCase):
def test_001_unary_prefix(self):
text = "++x"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_PREFIX', '++',
TOKEN('T_TEXT', 'x'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_unary_postfix(self):
text = "x++"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_POSTFIX', '++',
TOKEN('T_TEXT', 'x'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_prefix_plus(self):
text = "x=+1"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'x'),
TOKEN('T_PREFIX', '+',
TOKEN('T_NUMBER', '1')))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_postfix_minus(self):
text = "x=-1"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'x'),
TOKEN('T_PREFIX', '-',
TOKEN('T_NUMBER', '1')))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_prefix_delete(self):
text = "delete x"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_PREFIX', 'delete',
TOKEN('T_TEXT', 'x'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_spread(self):
text = "{...a}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_OBJECT', '{}',
TOKEN('T_SPREAD', '...',
TOKEN('T_TEXT', 'a')))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_tagged_template(self):
text = "myTag`b${c}a`"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_TAGGED_TEMPLATE', '',
TOKEN('T_TEXT', 'myTag'),
TOKEN('T_TEMPLATE_STRING', '`b${c}a`',
TOKEN('T_STRING', 'b'),
TOKEN('T_TEMPLATE_EXPRESSION', 'c',
TOKEN('T_TEXT', 'c')),
TOKEN('T_STRING', 'a'))))
self.assertFalse(parsecmp(expected, ast, False))
class ParserBinOpTestCase(unittest.TestCase):
def test_001_assign(self):
text = "x = 1"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'x'),
TOKEN('T_NUMBER', '1'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_add(self):
text = "x + 1"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_BINARY', '+',
TOKEN('T_TEXT', 'x'),
TOKEN('T_NUMBER', '1'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_subtract(self):
text = "x - 1"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_BINARY', '-',
TOKEN('T_TEXT', 'x'),
TOKEN('T_NUMBER', '1'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_mul(self):
text = "x * 1"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_BINARY', '*',
TOKEN('T_TEXT', 'x'),
TOKEN('T_NUMBER', '1'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_divide(self):
text = "x / 1"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_BINARY', '/',
TOKEN('T_TEXT', 'x'),
TOKEN('T_NUMBER', '1'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_divide_2(self):
text = "x = ((1/2)/3)"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'x'),
TOKEN('T_GROUPING', '()',
TOKEN('T_BINARY', '/',
TOKEN('T_GROUPING', '()',
TOKEN('T_BINARY', '/',
TOKEN('T_NUMBER', '1'),
TOKEN('T_NUMBER', '2'))),
TOKEN('T_NUMBER', '3'))))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_attribute(self):
text = "a.b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_GET_ATTR', '.',
TOKEN('T_TEXT', 'a'),
TOKEN('T_ATTR', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_sub_assign(self):
text = "a -= b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '-=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_add_assign(self):
text = "a += b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '+=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_multiply_assign(self):
text = "a *= b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '*=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_div_assign(self):
text = "a /= b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '/=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_exp_assign(self):
text = "a **= b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '**=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_null_assign(self):
text = "a ??= b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '??=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_or_assign(self):
text = "a ||= b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '||=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_and_assign(self):
text = "a &&= b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '&&=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_logical_or_assign(self):
text = "a |= b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '|=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_logical_and_assign(self):
text = "a &= b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '&=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_bitwise_not(self):
text = "~ b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_PREFIX', '~',
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_lshift_assign(self):
text = "a <<= b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '<<=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_rshift_assign(self):
text = "a >>= b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '>>=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_unsigned_rshift(self):
text = "a >>> b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_BINARY', '>>>',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_unsigned_rshift_assign(self):
text = "a >>>= b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '>>>=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_null_coalescing_v1(self):
text = "a??b"
tokens = Lexer().lex(text)
parser = Parser()
ast = parser.parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_BINARY', '??',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b')))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_optional_chaining_v1(self):
text = "a?.b"
tokens = Lexer().lex(text)
parser = Parser()
parser.feat_xform_optional_chaining = False
ast = parser.parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_OPTIONAL_CHAINING', '?.',
TOKEN('T_TEXT', 'a'),
TOKEN('T_ATTR', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_optional_chaining_v2(self):
text = "a?.b"
tokens = Lexer().lex(text)
parser = Parser()
parser.feat_xform_optional_chaining = True
ast = parser.parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_BINARY', '.',
TOKEN('T_GROUPING', '()',
TOKEN('T_BINARY', '||',
TOKEN('T_GROUPING', '()',
TOKEN('T_TEXT', 'a')),
TOKEN('T_OBJECT', '{}'))),
TOKEN('T_ATTR', 'b')))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_ternary(self):
text = "a?b:c"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'),
TOKEN('T_TEXT', 'c'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_subscr_1(self):
text = "x[]"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_SUBSCR', '',
TOKEN('T_TEXT', 'x'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_subscr_2(self):
text = "x[0]"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_SUBSCR', '',
TOKEN('T_TEXT', 'x'),
TOKEN('T_NUMBER', '0'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_subscr_3a(self):
text = "x?.[0]"
tokens = Lexer().lex(text)
parser = Parser()
parser.feat_xform_optional_chaining = True
ast = parser.parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_SUBSCR', '[]',
TOKEN('T_GROUPING', '()',
TOKEN('T_BINARY', '||',
TOKEN('T_GROUPING', '()',
TOKEN('T_TEXT', 'x')),
TOKEN('T_OBJECT', '{}'))),
TOKEN('T_NUMBER', '0')))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_subscr_3b(self):
text = "x?.[0]"
tokens = Lexer().lex(text)
parser = Parser()
parser.feat_xform_optional_chaining = False
ast = parser.parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_OPTIONAL_CHAINING', '?.',
TOKEN('T_SUBSCR', '[]',
TOKEN('T_TEXT', 'x'),
TOKEN('T_NUMBER', '0'))))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_assign_newline(self):
text = """a = b =
f()"""
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'b'),
TOKEN('T_FUNCTIONCALL', '',
TOKEN('T_TEXT', 'f'),
TOKEN('T_ARGLIST', '()')))))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_destructure_assign(self):
text = "var [a,b,c] = d"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_VAR', 'var',
TOKEN('T_ASSIGN', '=',
TOKEN('T_UNPACK_SEQUENCE', '[]',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'),
TOKEN('T_TEXT', 'c')),
TOKEN('T_TEXT', 'd'))))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_destructure_assign_2(self):
text = """
var [x=[2][0]] = [];
"""
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_VAR', 'var',
TOKEN('T_ASSIGN', '=',
TOKEN('T_UNPACK_SEQUENCE', '[]',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'),
TOKEN('T_TEXT', 'c')),
TOKEN('T_TEXT', 'd'))))
class ParserBinOpErrorTestCase(unittest.TestCase):
def test_001_assign(self):
text = "a ? b c d "
tokens = Lexer().lex(text)
with self.assertRaises(ParseError) as ctxt:
Parser().parse(tokens)
class ParserKeywordTestCase(unittest.TestCase):
def test_001_let(self):
text = "let x, y=1"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_VAR', 'let',
TOKEN('T_TEXT', 'x'),
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'y'),
TOKEN('T_NUMBER', '1'))))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_break(self):
text = "break"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '', TOKEN('T_BREAK', 'break'))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_continue(self):
text = "continue"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '', TOKEN('T_CONTINUE', 'continue'))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_switch(self):
text = """
switch (a) {
case 0:
x=0;
break;
default:
x=1;
break;
}
"""
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_SWITCH', 'switch',
TOKEN('T_GROUPING', '()',
TOKEN('T_TEXT', 'a')),
TOKEN('T_BLOCK', '{}',
TOKEN('T_CASE', 'case',
TOKEN('T_NUMBER', '0')),
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'x'),
TOKEN('T_NUMBER', '0')),
TOKEN('T_BREAK', 'break'),
TOKEN('T_DEFAULT', 'default'),
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'x'),
TOKEN('T_NUMBER', '1')),
TOKEN('T_BREAK', 'break')))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_branch_1(self):
text = "if (true) {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_BRANCH', 'if',
TOKEN('T_ARGLIST', '()',
TOKEN('T_KEYWORD', 'true')),
TOKEN('T_BLOCK', '{}'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_branch_2(self):
text = "if (true) {} else {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_BRANCH', 'if',
TOKEN('T_ARGLIST', '()',
TOKEN('T_KEYWORD', 'true')),
TOKEN('T_BLOCK', '{}'),
TOKEN('T_BLOCK', '{}'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_branch_3(self):
text = "if (true) {} else if (false) {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_BRANCH', 'if',
TOKEN('T_ARGLIST', '()',
TOKEN('T_KEYWORD', 'true')),
TOKEN('T_BLOCK', '{}'),
TOKEN('T_BRANCH', 'if',
TOKEN('T_ARGLIST', '()',
TOKEN('T_KEYWORD', 'false')),
TOKEN('T_BLOCK', '{}')))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_branch_4(self):
text = "if (true) {} else if (false) {} else {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_BRANCH', 'if',
TOKEN('T_ARGLIST', '()',
TOKEN('T_KEYWORD', 'true')),
TOKEN('T_BLOCK', '{}'),
TOKEN('T_BRANCH', 'if',
TOKEN('T_ARGLIST', '()',
TOKEN('T_KEYWORD', 'false')),
TOKEN('T_BLOCK', '{}'),
TOKEN('T_BLOCK', '{}')))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_new_1(self):
text = "new A()"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_NEW', 'new',
TOKEN('T_FUNCTIONCALL', '',
TOKEN('T_TEXT', 'A'),
TOKEN('T_ARGLIST', '()')))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_new_2(self):
text = "new A"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_NEW', 'new',
TOKEN('T_TEXT', 'A'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_while_1(self):
text = "while (true) { x; }"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_WHILE', 'while',
TOKEN('T_ARGLIST', '()',
TOKEN('T_KEYWORD', 'true')),
TOKEN('T_BLOCK', '{}',
TOKEN('T_TEXT', 'x')))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_dowhile_1(self):
text = "do { x; } while (true);"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_DOWHILE', 'do',
TOKEN('T_BLOCK', '{}',
TOKEN('T_TEXT', 'x')),
TOKEN('T_ARGLIST', '()',
TOKEN('T_KEYWORD', 'true')))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_for(self):
text = "for (let x=0; x < 5; x++) {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_FOR', 'for',
TOKEN('T_ARGLIST', '()',
TOKEN('T_VAR', 'let',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'x'),
TOKEN('T_NUMBER', '0'))),
TOKEN('T_BINARY', '<',
TOKEN('T_TEXT', 'x'),
TOKEN('T_NUMBER', '5')),
TOKEN('T_POSTFIX', '++',
TOKEN('T_TEXT', 'x'))),
TOKEN('T_BLOCK', '{}'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_import_js(self):
text = "import './file.js'"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_IMPORT', './file.js',
TOKEN('T_OBJECT', '{}'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_import_mod(self):
text = "import mod with {NamedExport}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_IMPORT', 'mod',
TOKEN('T_OBJECT', '{}',
TOKEN('T_TEXT', 'NamedExport')))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_import_mod_path(self):
text = "import a.b.c with {NamedExport}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_IMPORT', 'a.b.c',
TOKEN('T_OBJECT', '{}',
TOKEN('T_TEXT', 'NamedExport')))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_export_var(self):
text = "export const v1=null"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_EXPORT', 'export',
TOKEN('T_VAR', 'const',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'v1'),
TOKEN('T_KEYWORD', 'null'))),
TOKEN('T_TEXT', 'v1')))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_export_function(self):
text = "export function a() {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_EXPORT', 'export',
TOKEN('T_FUNCTION', 'function',
TOKEN('T_TEXT', 'a'),
TOKEN('T_ARGLIST', '()'),
TOKEN('T_BLOCK', '{}')),
TOKEN('T_TEXT', 'a')))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_export_class(self):
text = "export class A {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_EXPORT', 'export',
TOKEN('T_CLASS', 'class',
TOKEN('T_TEXT', 'A'),
TOKEN('T_KEYWORD', 'extends'),
TOKEN('T_CLASS_BLOCK', '{}')),
TOKEN('T_TEXT', 'A')))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_export_many(self):
text = "export let v1, v2"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_EXPORT', 'export',
TOKEN('T_VAR', 'let',
TOKEN('T_TEXT', 'v1'),
TOKEN('T_TEXT', 'v2')),
TOKEN('T_TEXT', 'v1'),
TOKEN('T_TEXT', 'v2')))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_super_constructor_1(self):
text = "super()"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_FUNCTIONCALL', '',
TOKEN('T_KEYWORD', 'super'),
TOKEN('T_ARGLIST', '()'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_super_function_1(self):
text = "super.onClick()"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_FUNCTIONCALL', '',
TOKEN('T_GET_ATTR', '.',
TOKEN('T_KEYWORD', 'super'),
TOKEN('T_ATTR', 'onClick')),
TOKEN('T_ARGLIST', '()'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_try_catch(self):
text = """
try {
throw 0;
} catch (ex) {
} finally {
}
"""
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_TRY', 'try',
TOKEN('T_BLOCK', '{}',
TOKEN('T_THROW', 'throw',
TOKEN('T_NUMBER', '0'))),
TOKEN('T_CATCH', 'catch',
TOKEN('T_ARGLIST', '()',
TOKEN('T_TEXT', 'ex')),
TOKEN('T_BLOCK', '{}')),
TOKEN('T_FINALLY', 'finally',
TOKEN('T_BLOCK', '{}')))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_lambda_assign(self):
"""
this test can only pass if binary operators
are collected right-to-left and the arrow
operator is treated at the same precedence
"""
text = """
const f = (d,k,v) => d[k] = v
"""
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_VAR', 'const',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'f'),
TOKEN('T_LAMBDA', '=>',
TOKEN('T_TEXT', 'Anonymous'),
TOKEN('T_ARGLIST', '()',
TOKEN('T_TEXT', 'd'),
TOKEN('T_TEXT', 'k'),
TOKEN('T_TEXT', 'v')
),
TOKEN('T_ASSIGN', '=',
TOKEN('T_SUBSCR', '',
TOKEN('T_TEXT', 'd'),
TOKEN('T_TEXT', 'k'))
),
TOKEN('T_TEXT', 'v')
)
)
)
)
self.assertFalse(parsecmp(expected, ast, False))
class ParserFunctionTestCase(unittest.TestCase):
def test_001_anonymous_function(self):
text = "function (x) {return x;}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_ANONYMOUS_FUNCTION', 'function',
TOKEN('T_TEXT', 'Anonymous'),
TOKEN('T_ARGLIST', '()',
TOKEN('T_TEXT', 'x')),
TOKEN('T_BLOCK', '{}',
TOKEN('T_RETURN', 'return',
TOKEN('T_TEXT', 'x'))))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_named_function(self):
text = "function example(x) {return x;}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_FUNCTION', 'function',
TOKEN('T_TEXT', 'example'),
TOKEN('T_ARGLIST', '()',
TOKEN('T_TEXT', 'x')),
TOKEN('T_BLOCK', '{}',
TOKEN('T_RETURN', 'return',
TOKEN('T_TEXT', 'x'))))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_lambda_1(self):
text = "() => {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_LAMBDA', '=>',
Token(Token.T_TEXT, 1, 3, 'Anonymous'),
TOKEN('T_ARGLIST', '()'),
TOKEN('T_OBJECT', '{}'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_lambda_2(self):
text = "(a, b, c) => {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_LAMBDA', '=>',
Token(Token.T_TEXT, 1, 3, 'Anonymous'),
TOKEN('T_ARGLIST', '()',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'),
TOKEN('T_TEXT', 'c')),
TOKEN('T_OBJECT', '{}'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_lambda_3(self):
text = "a => b"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_LAMBDA', '=>',
Token(Token.T_TEXT, 1, 3, 'Anonymous'),
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_void_iife(self):
text = """
void function iife() {
console.log("test")
}();
"""
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_PREFIX', 'void',
TOKEN('T_FUNCTIONCALL', '',
TOKEN('T_FUNCTION', 'function',
TOKEN('T_TEXT', 'iife'),
TOKEN('T_ARGLIST', '()'),
TOKEN('T_BLOCK', '{}',
TOKEN('T_FUNCTIONCALL', '',
TOKEN('T_GET_ATTR', '.',
TOKEN('T_TEXT', 'console'),
TOKEN('T_ATTR', 'log')),
TOKEN('T_ARGLIST', '()',
TOKEN('T_STRING', '"test"'))))),
TOKEN('T_ARGLIST', '()'))))
self.assertFalse(parsecmp(expected, ast, False))
class ParserClassTestCase(unittest.TestCase):
def test_001_class_1(self):
text = "class {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_CLASS', 'class',
TOKEN('T_TEXT', ''),
TOKEN('T_KEYWORD', 'extends'),
TOKEN('T_CLASS_BLOCK', '{}'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_class_2(self):
text = "class A {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_CLASS', 'class',
TOKEN('T_TEXT', 'A'),
TOKEN('T_KEYWORD', 'extends'),
TOKEN('T_CLASS_BLOCK', '{}'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_class_3(self):
text = "class A extends B {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_CLASS', 'class',
TOKEN('T_TEXT', 'A'),
TOKEN('T_KEYWORD', 'extends',
TOKEN('T_TEXT', 'B')),
TOKEN('T_CLASS_BLOCK', '{}'))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_class_4(self):
text = """
class A extends B {
onClick(event) {
}
}
"""
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_CLASS', 'class',
TOKEN('T_TEXT', 'A'),
TOKEN('T_KEYWORD', 'extends',
TOKEN('T_TEXT', 'B')),
TOKEN('T_CLASS_BLOCK', '{}',
TOKEN('T_METHOD', '',
TOKEN('T_TEXT', 'onClick'),
TOKEN('T_ARGLIST', '()',
TOKEN('T_TEXT', 'event')),
TOKEN('T_BLOCK', '{}'))))
)
self.assertFalse(parsecmp(expected, ast, False))
def test_001_class_5(self):
text = "class A extends X.Y {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_CLASS', 'class',
TOKEN('T_TEXT', 'A'),
TOKEN('T_KEYWORD', 'extends',
TOKEN('T_GET_ATTR', '.',
TOKEN('T_TEXT', 'X'),
TOKEN('T_ATTR', 'Y'))),
TOKEN('T_CLASS_BLOCK', '{}'))
)
self.assertFalse(parsecmp(expected, ast, False))
class ParserChallengeTestCase(unittest.TestCase):
def _assert(self, expected, text, debug=False):
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
self.assertFalse(parsecmp(expected, ast, debug))
def test_001_challenge_1(self):
# a complicated unsafe true/false branch
text = "if (true) a=b=c; else d=f;"
expected = TOKEN('T_MODULE', '',
TOKEN('T_BRANCH', 'if',
TOKEN('T_ARGLIST', '()',
TOKEN('T_KEYWORD', 'true')),
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'b'),
TOKEN('T_TEXT', 'c'))),
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'd'),
TOKEN('T_TEXT', 'f'))))
self._assert(expected, text)
def test_001_challenge_2(self):
# a useless for loop
text = "for(;!((t=o1).y1&&t.y2||t===o2););"
expected = TOKEN('T_MODULE', '',
TOKEN('T_FOR', 'for',
TOKEN('T_ARGLIST', '()',
TOKEN('T_EMPTY_TOKEN', ''),
TOKEN('T_PREFIX', '!',
TOKEN('T_GROUPING', '()',
TOKEN('T_LOGICAL_OR', '||',
TOKEN('T_LOGICAL_AND', '&&',
TOKEN('T_GET_ATTR', '.',
TOKEN('T_GROUPING', '()',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 't'),
TOKEN('T_TEXT', 'o1'))),
TOKEN('T_ATTR', 'y1')),
TOKEN('T_GET_ATTR', '.',
TOKEN('T_TEXT', 't'),
TOKEN('T_ATTR', 'y2'))),
TOKEN('T_BINARY', '===',
TOKEN('T_TEXT', 't'),
TOKEN('T_TEXT', 'o2'))))),
TOKEN('T_EMPTY_TOKEN', '')),
TOKEN('T_BLOCK', '{}')))
self._assert(expected, text)
def test_001_challenge_3(self):
text = "a?b?c:d:e"
expected = TOKEN('T_MODULE', '',
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'b'),
TOKEN('T_TEXT', 'c'),
TOKEN('T_TEXT', 'd')),
TOKEN('T_TEXT', 'e')))
self._assert(expected, text)
def test_001_challenge_4(self):
text = "a?b?c?c:c:b:a"
expected = TOKEN('T_MODULE', '',
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'b'),
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'c'),
TOKEN('T_TEXT', 'c'),
TOKEN('T_TEXT', 'c')),
TOKEN('T_TEXT', 'b')),
TOKEN('T_TEXT', 'a')))
self._assert(expected, text)
def test_001_challenge_5(self):
text = "a?b:a?a?b:a?b:c:c"
expected = TOKEN('T_MODULE', '',
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'),
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'),
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'),
TOKEN('T_TEXT', 'c'))),
TOKEN('T_TEXT', 'c'))))
self._assert(expected, text)
def test_001_challenge_6(self):
text = "a2?b2:c2?d2?e2:f2?g2:h2:i2;"
expected = TOKEN('T_MODULE', '',
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'a2'),
TOKEN('T_TEXT', 'b2'),
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'c2'),
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'd2'),
TOKEN('T_TEXT', 'e2'),
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'f2'),
TOKEN('T_TEXT', 'g2'),
TOKEN('T_TEXT', 'h2'))),
TOKEN('T_TEXT', 'i2'))))
self._assert(expected, text)
def test_001_challenge_7(self):
# a for loop with unsafe block
# confuses what is a arglist or function call
text = "for(x in y)(a)[i]&&e[i].apply(x,f)"
expected = TOKEN('T_MODULE', '',
TOKEN('T_FOR_IN', 'for',
TOKEN('T_TEXT', 'x'),
TOKEN('T_TEXT', 'y'),
TOKEN('T_LOGICAL_AND', '&&',
TOKEN('T_SUBSCR', '',
TOKEN('T_GROUPING', '()',
TOKEN('T_TEXT', 'a')),
TOKEN('T_TEXT', 'i')),
TOKEN('T_FUNCTIONCALL', '',
TOKEN('T_GET_ATTR', '.',
TOKEN('T_SUBSCR', '',
TOKEN('T_TEXT', 'e'),
TOKEN('T_TEXT', 'i')),
TOKEN('T_ATTR', 'apply')),
TOKEN('T_ARGLIST', '()',
TOKEN('T_TEXT', 'x'),
TOKEN('T_TEXT', 'f'))))))
self._assert(expected, text)
def test_001_challenge_8(self):
# inner for loop order of operations
text = "for(a in b=c)d;"
expected = TOKEN('T_MODULE', '',
TOKEN('T_FOR_IN', 'for',
TOKEN('T_TEXT', 'a'),
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'b'),
TOKEN('T_TEXT', 'c')),
TOKEN('T_TEXT', 'd')))
self._assert(expected, text)
def test_001_challenge_9(self):
# inner for loop order of operations
text = "for(a=b,c=d;e<f;g++) h;"
expected = TOKEN('T_MODULE', '',
TOKEN('T_FOR', 'for',
TOKEN('T_ARGLIST', '()',
TOKEN('T_COMMA', ',',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b')),
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'c'),
TOKEN('T_TEXT', 'd'))),
TOKEN('T_BINARY', '<',
TOKEN('T_TEXT', 'e'),
TOKEN('T_TEXT', 'f')),
TOKEN('T_POSTFIX', '++',
TOKEN('T_TEXT', 'g'))),
TOKEN('T_TEXT', 'h')))
self._assert(expected, text)
def test_001_challenge_10(self):
# inner for loop order of operations
text = "var a;for(b=c;d<e;f++)for(g in h=i)j;"
expected = TOKEN('T_MODULE', '',
TOKEN('T_VAR', 'var',
TOKEN('T_TEXT', 'a')),
TOKEN('T_FOR', 'for',
TOKEN('T_ARGLIST', '()',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'b'),
TOKEN('T_TEXT', 'c')),
TOKEN('T_BINARY', '<',
TOKEN('T_TEXT', 'd'),
TOKEN('T_TEXT', 'e')),
TOKEN('T_POSTFIX', '++',
TOKEN('T_TEXT', 'f'))),
TOKEN('T_FOR_IN', 'for',
TOKEN('T_TEXT', 'g'),
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'h'),
TOKEN('T_TEXT', 'i')),
TOKEN('T_TEXT', 'j'))))
self._assert(expected, text)
def test_001_challenge_11(self):
# inner for loop order of operations
text = """
if("object"==typeof t)for(var n in t)this._on(n,t[n],i)else for(var o=0,s=(
t=d(t)).length;o<s;o++)this._on(t[o],i,e);
"""
expected = TOKEN('T_MODULE', '',
TOKEN('T_BRANCH', 'if',
TOKEN('T_ARGLIST', '()',
TOKEN('T_BINARY', '==',
TOKEN('T_STRING', '"object"'),
TOKEN('T_PREFIX', 'typeof',
TOKEN('T_TEXT', 't')))),
TOKEN('T_FOR_IN', 'for',
TOKEN('T_VAR', 'var',
TOKEN('T_TEXT', 'n')),
TOKEN('T_TEXT', 't'),
TOKEN('T_FUNCTIONCALL', '',
TOKEN('T_GET_ATTR', '.',
TOKEN('T_KEYWORD', 'this'),
TOKEN('T_ATTR', '_on')),
TOKEN('T_ARGLIST', '()',
TOKEN('T_TEXT', 'n'),
TOKEN('T_SUBSCR', '',
TOKEN('T_TEXT', 't'),
TOKEN('T_TEXT', 'n')),
TOKEN('T_TEXT', 'i')))),
TOKEN('T_FOR', 'for',
TOKEN('T_ARGLIST', '()',
TOKEN('T_VAR', 'var',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'o'),
TOKEN('T_NUMBER', '0')),
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 's'),
TOKEN('T_GET_ATTR', '.',
TOKEN('T_GROUPING', '()',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 't'),
TOKEN('T_FUNCTIONCALL', '',
TOKEN('T_TEXT', 'd'),
TOKEN('T_ARGLIST', '()',
TOKEN('T_TEXT', 't'))))),
TOKEN('T_ATTR', 'length')))),
TOKEN('T_BINARY', '<',
TOKEN('T_TEXT', 'o'),
TOKEN('T_TEXT', 's')),
TOKEN('T_POSTFIX', '++',
TOKEN('T_TEXT', 'o'))),
TOKEN('T_FUNCTIONCALL', '',
TOKEN('T_GET_ATTR', '.',
TOKEN('T_KEYWORD', 'this'),
TOKEN('T_ATTR', '_on')),
TOKEN('T_ARGLIST', '()',
TOKEN('T_SUBSCR', '',
TOKEN('T_TEXT', 't'),
TOKEN('T_TEXT', 'o')),
TOKEN('T_TEXT', 'i'),
TOKEN('T_TEXT', 'e'))))))
self._assert(expected, text)
def test_001_challenge_12(self):
text = """ a?"str"in b:c """
expected = TOKEN('T_MODULE', '',
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'a'),
TOKEN('T_BINARY', 'in',
TOKEN('T_STRING', '"str"'),
TOKEN('T_TEXT', 'b')),
TOKEN('T_TEXT', 'c')))
self._assert(expected, text)
def test_001_challenge_13(self):
text = """ a=x?b=y:c=z """
expected = TOKEN('T_MODULE', '',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TERNARY', '?',
TOKEN('T_TEXT', 'x'),
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'b'),
TOKEN('T_TEXT', 'y')),
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'c'),
TOKEN('T_TEXT', 'z')))))
self._assert(expected, text)
class ParserModuleTestCase(unittest.TestCase):
"""
import {name} from './modules/module.js';
import {name as foo} from './modules/module.js';
import {name1, name2} from './modules/module.js';
import {name1 as foo, name2 as bar} from './modules/module.js';
import * as Module from './modules/module.js';
"""
def test_001_import_export(self):
# test that all import/export combinations can
# be parsed without any issues
# TODO: remove support for
# 'import foo'
# 'import foo.bar'
# TODO: add support for import module as:
#
# daedalus import modes:
# from module <name> import {<names>}
# import module <name>
# import module <name> as <name>
# include <path>
text = """
from module foo import {bar}
from module foo.bar import {baz}
import module foo.bar
from foo import {bar}
from foo.bar import {baz}
import foo
import foo.bar
include 'foo.js'
export a
export a = 1
export const a = 1
export let a = 1
export var a = 1
export function a () {}
export class a {}
export default a
export default a = 1
export default const a = 1
export default let a = 1
export default var a = 1
export default function a () {}
export default class a {}
"""
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
def test_001_module(self):
text = "import { name } from './module/module.js'"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_IMPORT_JS_MODULE', "'./module/module.js'",
TOKEN('T_TEXT', 'name')))
self.assertFalse(parsecmp(expected, ast, False))
def test_002_module(self):
text = "import { name1, name2 } from './module/module.js'"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_IMPORT_JS_MODULE', "'./module/module.js'",
TOKEN('T_TEXT', 'name1'),
TOKEN('T_TEXT', 'name2')))
self.assertFalse(parsecmp(expected, ast, False))
def test_003_module(self):
text = "import {a as b} from './module/module.js'"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_IMPORT_JS_MODULE', "'./module/module.js'",
TOKEN('T_KEYWORD', 'as',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))))
self.assertFalse(parsecmp(expected, ast, False))
def test_004_module(self):
text = "import {a as b, c as d} from './module/module.js'"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_IMPORT_JS_MODULE', "'./module/module.js'",
TOKEN('T_KEYWORD', 'as',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b')),
TOKEN('T_KEYWORD', 'as',
TOKEN('T_TEXT', 'c'),
TOKEN('T_TEXT', 'd'))))
self.assertFalse(parsecmp(expected, ast, False))
def test_005_module(self):
text = "import * as module from './module/module.js'"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_IMPORT_JS_MODULE_AS', "'./module/module.js'",
TOKEN('T_TEXT', 'module')))
self.assertFalse(parsecmp(expected, ast, False))
class ParserTypeAnnotationTestCase(unittest.TestCase):
def test_001_typedef(self):
text = "type Foo = List"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_TYPE', 'type',
TOKEN('T_ASSIGN', '=',
TOKEN('T_TEXT', 'Foo'),
TOKEN('T_TEXT', 'List'))))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_annotate_var_1(self):
text = "let x : string"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_VAR', 'let',
TOKEN('T_BINARY', ':',
TOKEN('T_TEXT', 'x'),
TOKEN('T_TEXT', 'string'))))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_annotate_var_2(self):
text = "let x : string, y : string"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_VAR', 'let',
TOKEN('T_BINARY', ':',
TOKEN('T_TEXT', 'x'),
TOKEN('T_TEXT', 'string')),
TOKEN('T_BINARY', ':',
TOKEN('T_TEXT', 'y'),
TOKEN('T_TEXT', 'string'))))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_annotate_function_1(self):
text = "function f(x: a=>b): a=>b {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_FUNCTION', 'function',
TOKEN('T_TEXT', 'f',
TOKEN('T_LAMBDA', '=>',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b'))),
TOKEN('T_ARGLIST', '()',
TOKEN('T_TEXT', 'x',
TOKEN('T_LAMBDA', '=>',
TOKEN('T_TEXT', 'a'),
TOKEN('T_TEXT', 'b')))),
TOKEN('T_BLOCK', '{}')))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_annotate_lambda_1(self):
text = "(x: int) => int"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_LAMBDA', '=>',
TOKEN('T_TEXT', 'Anonymous'),
TOKEN('T_ARGLIST', '()',
TOKEN('T_BINARY', ':',
TOKEN('T_TEXT', 'x'),
TOKEN('T_KEYWORD', 'int'))),
TOKEN('T_KEYWORD', 'int')))
self.assertFalse(parsecmp(expected, ast, False))
def test_001_generic(self):
text = "function f<T>(x:T) : T {}"
tokens = Lexer().lex(text)
ast = Parser().parse(tokens)
expected = TOKEN('T_MODULE', '',
TOKEN('T_FUNCTION', 'function',
TOKEN('T_TEXT', 'f',
TOKEN('T_TEXT', 'T')),
TOKEN('T_ARGLIST', '()',
TOKEN('T_TEXT', 'x',
TOKEN('T_TEXT', 'T'))),
TOKEN('T_BLOCK', '{}')))
self.assertFalse(parsecmp(expected, ast, False))
def main():
unittest.main()
if __name__ == '__main__':
main()
|
import os
import yaml
SLEEP = 0.5
CACHE_FILE = ".cache.json"
AUTHOR_FORMAT = "%C (%N)"
DATE_FORMAT = "%y-%m-%d"
CONFIG_FILE = "config.yml"
if os.path.exists(CONFIG_FILE):
c = yaml.load(CONFIG_FILE)
SLEEP = c.get("sleep") or SLEEP
CACHE_FILE = c.get("cache_file") or CACHE_FILE
AUTHOR_FORMAT = c.get("author_format") or AUTHOR_FORMAT
DATE_FORMAT = c.get("date_format") or DATE_FORMAT
|
from typing import List
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
l, p = len(nums), 0
for i in range(l):
if nums[p] == nums[i]:
continue
else:
p = p + 1
nums[p] = nums[i]
return p + 1
if __name__ == "__main__":
sol = Solution()
nums = [1, 1, 2]
v = sol.removeDuplicates(nums)
print(v, ", nums = ", nums[0:v])
nums = [0, 0, 1, 1, 1, 2, 2, 3, 3, 4]
v = sol.removeDuplicates(nums)
print(v, ", nums = ", nums[0:v])
|
# -*- coding: utf-8 -*-
import numpy as np
from enum import Enum, auto, unique
from collections import namedtuple, OrderedDict
from copy import deepcopy
from .affine import change_of_basis, transform
from .atom_data import (
ELEM_TO_MAGMOM,
ELEM_TO_MASS,
ELEM_TO_NAME,
NAME_TO_ELEM,
ELEM_TO_NUM,
NUM_TO_ELEM,
chemical_symbols,
atomic_names,
)
from .lattice import Lattice
# TODO: make this class an enumeration?
class Element:
"""
Class representing an abtract chemical element, but no particular atom.
This class gives access to elemental properties, like atomic number,
atomic mass, full element name, etc.
Parameters
----------
element : str, int, or Element
Elemental symbol (e.g. "He"), element name (e.g. "Helium"),
atomic number, or another `Element` instance.
Raises
------
ValueError : if the element is not valid.
"""
valid_symbols = frozenset(chemical_symbols)
valid_names = frozenset(atomic_names)
def __init__(self, element, *args, **kwargs):
if isinstance(element, int):
try:
element = NUM_TO_ELEM[element]
except KeyError:
raise ValueError(f"Atomic number {element} not supported.")
elif isinstance(element, Element):
element = element.symbol
# At this point, `element` is a string
element = str(element).title()
valid_string_inputs = self.valid_names.union(self.valid_symbols)
if element not in valid_string_inputs:
raise ValueError(f"Element {element} is not valid.")
if element in self.valid_symbols:
self.element = element
elif element in self.valid_names:
self.element = NAME_TO_ELEM[element]
def __str__(self):
return self.symbol
def __repr__(self):
return f"< {self.name} >"
def __eq__(self, other):
if isinstance(other, Element):
return self.element == other.element
return NotImplemented
def __hash__(self):
# Technically, if atomic_number is an int, hash(atomic_number) = atomic_number
# However, just in case this won't be true in the future, we still use the hash() function
return hash(self.atomic_number)
@property
def element_full(self):
""" Full element name, e.g. "Hydrogen" """
return self.name
@property
def name(self):
""" Full element name, e.g. "Hydrogen" """
return ELEM_TO_NAME[self.element]
@property
def symbol(self):
""" Elemental symbol, e.g. "He" """
return self.element
@property
def atomic_number(self):
""" Atomic number """
return ELEM_TO_NUM[self.element]
@property
def mass(self):
""" Atomic mass [u] """
return ELEM_TO_MASS[self.element]
@property
def magnetic_moment_ground(self):
""" Ground state magnetic moment. """
return ELEM_TO_MAGMOM[self.element]
class Atom(Element):
"""
Container object for atomic data.
Parameters
----------
element : str or int
Chemical element symbol or atomic number.
coords : array-like, shape (3,)
Coordinates of the atom in fractional form.
lattice : Lattice or array-like, shape (3,3)
Lattice on which the atom is positioned.
displacement : array-like or None, optional
Atomic maximum displacement [Angs].
magmom : float, optional
Magnetic moment. If None (default), the ground-state magnetic moment is used.
occupancy : float, optional
Fractional occupancy. If None (default), occupancy is set to 1.0.
tag : int or None, optional
Tag an atom with a unique identifier. Useful to keep track of atom order, for example
in PWSCF output files. This is mostly for internal use.
electronic_structure : ElectronicStructure or None, optional
Electronic orbital structure for this atom. If `None` (default), the ground
state for this element will be used.
"""
# Because of the possibility of a large number of atoms (> 1e6), we use the __slots__
# mechanism to make Atom objects as small as possible.
__slots__ = (
"element",
"coords_fractional",
"displacement",
"magmom",
"occupancy",
"lattice",
"electronic_structure",
)
def __init__(
self,
element,
coords,
lattice=None,
displacement=None,
magmom=None,
occupancy=1.0,
tag=None,
electronic_structure=None,
**kwargs,
):
super().__init__(element=element)
self.coords_fractional = np.asfarray(coords)
self.lattice = lattice or Lattice(np.eye(3))
self.displacement = np.asfarray(
displacement if displacement is not None else (0, 0, 0)
)
self.magmom = magmom or self.magnetic_moment_ground
self.occupancy = occupancy
self.tag = tag
self.electronic_structure = (
electronic_structure or ElectronicStructure.ground_state(element)
)
def __repr__(self):
x, y, z = tuple(self.coords_fractional)
return f"< Atom {self.element:<2} @ ({x:.2f}, {y:.2f}, {z:.2f}) | [{str(self.electronic_structure)}] >"
def __eq__(self, other):
if isinstance(other, Atom):
return (
super().__eq__(other)
and (self.magmom == other.magmom)
and distance_fractional(self, other) < 1e-3
and (self.lattice == other.lattice)
and np.allclose(self.displacement, other.displacement, atol=1e-3)
and (self.electronic_structure == other.electronic_structure)
)
return NotImplemented
def __hash__(self):
return hash(
(
super().__hash__(),
self.magmom,
tuple(np.round(self.coords_fractional, 4)),
self.lattice,
tuple(np.round(self.displacement, 3)),
hash(self.electronic_structure),
)
)
@classmethod
def from_ase(cls, atom):
"""
Returns an Atom instance from an ASE atom
Parameters
----------
atom : ase.Atom
"""
lattice = np.eye(3)
if atom.atoms is not None:
lattice = np.array(atom.atoms.cell)
return cls(
element=atom.symbol,
coords=frac_coords(atom.position, lattice),
magmom=atom.magmom,
)
@property
def coords_cartesian(self):
"""
Real-space position of the atom on the lattice, in Angstroms.
Returns
-------
pos : `~numpy.ndarray`, shape (3,)
Atomic position
Raises
------
RuntimeError : if this atom is not place on a lattice
"""
return real_coords(self.coords_fractional, self.lattice.lattice_vectors)
def transform(self, *matrices):
"""
Return an Atom with fractional coordinates transformed according to symmetry operators.
Parameters
----------
matrices : ndarrays, shape {(3,3), (4,4)}
Transformation matrices.
Returns
-------
atm : Atom
Transformed atom. The original atom is left unchanged.
"""
coords_fractional = np.array(self.coords_fractional, copy=True)
for matrix in matrices:
coords_fractional = transform(matrix, coords_fractional)
new_atom = deepcopy(self)
new_atom.coords_fractional[:] = coords_fractional
return new_atom
def __array__(self, *args, **kwargs):
""" Returns an array [Z, x, y, z] """
arr = np.empty(shape=(4,), *args, **kwargs)
arr[0] = self.atomic_number
arr[1::] = self.coords_fractional
return arr
def real_coords(frac_coords, lattice_vectors):
"""
Calculates the real-space coordinates of the atom from fractional coordinates and lattice vectors.
Parameters
----------
frac_coords : array-like, shape (3,)
Fractional coordinates
lattice_vectors : list of ndarrays, shape (3,)
Lattice vectors of the crystal.
Returns
-------
coords : ndarray, shape (3,)
"""
COB = change_of_basis(np.array(lattice_vectors), np.eye(3))
return transform(COB, frac_coords)
def frac_coords(real_coords, lattice_vectors):
"""
Calculates the fractional coordinates of the atom from real-space coordinates and lattice vectors.
Parameters
----------
real_coords : array-like, shape (3,)
Real-space coordinates
lattice_vectors : list of ndarrays, shape (3,)
Lattice vectors of the crystal.
Returns
-------
coords : ndarray, shape (3,)
"""
COB = change_of_basis(np.eye(3), np.array(lattice_vectors))
return transform(COB, real_coords)
def distance_fractional(atm1, atm2):
"""
Calculate the distance between two atoms in fractional coordinates.
Parameters
----------
atm1, atm2 : ``crystals.Atom``
Returns
-------
dist : float
Fractional distance between atoms.
Raises
------
RuntimeError : if atoms are not associated with the same lattice.
"""
if atm1.lattice != atm2.lattice:
raise RuntimeError(
"Distance is undefined if atoms are sitting on different lattices."
)
return np.linalg.norm(atm1.coords_fractional - atm2.coords_fractional)
def distance_cartesian(atm1, atm2):
"""
Calculate the distance between two atoms in cartesian coordinates.
Parameters
----------
atm1, atm2 : ``crystals.Atom``
Returns
-------
dist : float
Cartesian distance between atoms in Angstroms..
Raises
------
RuntimeError : if atoms are not associated with the same lattice.
"""
if atm1.lattice != atm2.lattice:
raise RuntimeError(
"Distance is undefined if atoms are sitting on different lattices."
)
return np.linalg.norm(atm1.coords_cartesian - atm2.coords_cartesian)
def is_element(element):
"""
Create a function that checks whether an atom is of a certain element.
Parameters
----------
element : str, int, or Element
Elemental symbol (e.g. "He"), atomic number, or Element instance.
Returns
-------
func : callable
Returns a function that can be used to check whether a `crystals.Atom`
instance is of a certain element.
Examples
--------
>>> is_vanadium = is_element('V') # is_vanadium is a function
>>> atm = Atom('V', [0,0,0])
>>> is_vanadium(atm)
True
"""
if not isinstance(element, Element):
element = Element(element)
def _is_element(atm):
return atm.atomic_number == element.atomic_number
return _is_element
@unique
class Orbital(Enum):
"""
Enumeration of electronic orbitals, used to described atomic
orbital structure.
We note that `Orbital` instances are ordered according to the Madelung rule.
Examples
--------
>>> Orbital("1s")
<Orbital.one_s: '1s'>
>>> Orbital.four_p == Orbital("4p")
True
"""
# It is important that the Orbitals are listed in the order that they
# are filled (Madelung rule) because this is how __lt__ is defined.
one_s = "1s"
two_s = "2s"
two_p = "2p"
three_s = "3s"
three_p = "3p"
four_s = "4s"
three_d = "3d"
four_p = "4p"
five_s = "5s"
four_d = "4d"
five_p = "5p"
six_s = "6s"
four_f = "4f"
five_d = "5d"
six_p = "6p"
seven_s = "7s"
five_f = "5f"
six_d = "6d"
seven_p = "7p"
def __lt__(self, other):
madelung_rule = [v for k, v in Orbital.__members__.items()]
return madelung_rule.index(self) < madelung_rule.index(other)
@classmethod
def maximum_electrons(cls, shell):
"""
Maximum number of electrons that can be placed in an orbital.
Parameters
----------
shell : Orbital or str
Returns
-------
max : int
"""
shell = Orbital(shell)
maxima = {
"s": 2,
"p": 6,
"d": 10,
"f": 14,
}
return maxima[shell.value[-1]]
# To print superscript in electronic structures.
# Note that this requires UTF-8 output.
superscript_map = {
"0": "⁰",
"1": "¹",
"2": "²",
"3": "³",
"4": "⁴",
"5": "⁵",
"6": "⁶",
"7": "⁷",
"8": "⁸",
"9": "⁹",
}
superscript_trans = str.maketrans(
"".join(superscript_map.keys()), "".join(superscript_map.values())
)
class ElectronicStructure:
"""
Description of the atomic orbital structure.
Parameters
----------
shells : dict[Orbital,int]
Dictionary containing the number of electrons in each Orbital, e.g. `{"1s": 2}`.
Raises
------
ValueError : if the electronic structure is not representable
Examples
--------
Electronic structures can be specified by hand:
>>> ElectronicStructure({"1s":2, "2s":2, "2p":2})
< ElectronicStructure: 1s²2s²2p² >
A shortcut exists for atomic ground states:
>>> ElectronicStructure.ground_state("Ti")
< ElectronicStructure: 1s²2s²2p⁶3s²3p⁶4s²3d² >
Notes
-----
Shells are allowed to be filled our of order deliberately, given that unusual
electronic structures can arise from ultrafast photoexcitation.
"""
def __init__(self, shells):
shells = {Orbital(k): v for k, v in shells.items()}
# Subclassing OrderedDict causes problems with pickling
# Instead, we dress this class on top of an OrderedDict property.
self._structure = OrderedDict([])
for k, v in shells.items():
self.__setitem__(k, v)
def __setitem__(self, key, value):
# We check that the number of electrons in each Orbital does not
# go above maximum possible.
shell = Orbital(key)
maximum_allowed_electrons = Orbital.maximum_electrons(shell)
if value > maximum_allowed_electrons:
raise ValueError(
f"There cannot be {value} electrons in orbital {shell.value}"
)
self._structure.__setitem__(shell, value)
# We ensure that orbital order is maintained
# by re-creating the OrderedDict, filled
self._structure = OrderedDict(sorted(self._structure.items()))
def __getitem__(self, key):
# In case the key doesn't exist, we return 0
# (i.e. 0 electrons in this orbital) because this allows
# to add electrons in-place, e.g.:
# >>> struct = ElectronicStructure({"1s":2})
# >>> struct["2p"] += 1
# even though there were no electrons there.
key = Orbital(key)
try:
return self._structure.__getitem__(key)
except KeyError:
return 0
def __str__(self):
result = ""
for shell, occ in self._structure.items():
result += shell.value + f"{occ}".translate(superscript_trans)
return result
def __repr__(self):
return f"< ElectronicStructure: {str(self)} >"
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
@classmethod
def ground_state(cls, element):
"""
Ground state electronic structure for a particular element.
Parameters
----------
element : Element, str, or int
Element, Symbol, or atomic number.
Returns
-------
structure : ElectronicStructure
Return the ground state electronic structure for a particular element.
Examples
--------
>>> ElectronicStructure.ground_state("Ne")
< ElectronicStructure: 1s²2s²2p⁶ >
"""
element = Element(element)
num_elec = element.atomic_number
structure = dict()
for shell in Orbital:
shell_elec = min([Orbital.maximum_electrons(shell), num_elec])
structure[shell] = shell_elec
num_elec -= shell_elec
if num_elec == 0:
break
return cls(structure)
@property
def outer_shell(self):
""" The outermost shell, or valence orbital. """
shells = list(self._structure.keys())
return shells[-1]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.