content
stringlengths 5
1.05M
|
|---|
# Copyright 2021 The Duet Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import collections
import contextlib
import functools
import inspect
from concurrent.futures import CancelledError
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Deque,
Dict,
List,
Optional,
Set,
Tuple,
TypeVar,
)
import duet.impl as impl
from duet.aitertools import aenumerate, aiter, AnyIterable, AsyncCollector
from duet.futuretools import AwaitableFuture
T = TypeVar("T")
U = TypeVar("U")
try:
asynccontextmanager = contextlib.asynccontextmanager
except AttributeError:
# In python 3.6 asynccontextmanager isn't available from the standard library, so we are using
# equivalent third-party implementation.
from aiocontext import async_contextmanager
asynccontextmanager = async_contextmanager
def run(func: Callable[..., Awaitable[T]], *args, **kwds) -> T:
"""Run an async function to completion.
Args:
func: The async function to run.
*args: Positional arguments to pass to func.
**kwds: Keyword arguments to pass to func.
Returns:
The final result of the async function.
"""
with impl.Scheduler() as scheduler:
task = scheduler.spawn(func(*args, **kwds))
return task.result
def sync(f: Callable[..., Awaitable[T]]) -> Callable[..., T]:
"""Decorator that adds a sync version of async function or method."""
sig = inspect.signature(f)
first_arg = next(iter(sig.parameters), None)
if first_arg == "self" or first_arg == "cls":
# For class or instance methods, look up the method to call on the given
# class or instance. This ensures that we call the right method even it
# has been overridden in a subclass. To illustrate, consider:
#
# class Parent:
# async def foo(self): ...
# foo_sync = duet.sync(foo)
#
# class Child(Parent):
# async def foo(self): ...
#
# A manual implementation of foo_sync would call duet.run(self.foo) so
# that Child().foo_sync() would call Child.foo instead of Parent.foo.
# We want the foo_sync wrapper to work the same way. But the wrapper
# was called with Parent.foo only, so we must look up the appropriate
# function by name at runtime, using getattr.
@functools.wraps(f)
def wrapped(self_or_cls, *args, **kw):
method = getattr(self_or_cls, f.__name__, None)
if inspect.ismethod(method) and id(method.__func__) == wrapped_id:
return run(f, self_or_cls, *args, **kw)
return run(method, *args, **kw)
wrapped_id = id(wrapped)
else:
@functools.wraps(f)
def wrapped(*args, **kw):
return run(f, *args, **kw)
return wrapped
def awaitable(value):
"""Wraps a value to ensure that it is awaitable."""
if inspect.isawaitable(value):
return value
if AwaitableFuture.isfuture(value):
return AwaitableFuture.wrap(value)
return _awaitable_value(value)
async def _awaitable_value(value):
return value
def awaitable_func(function):
"""Wraps a function to ensure that it returns an awaitable."""
if inspect.iscoroutinefunction(function):
return function
if inspect.isgeneratorfunction(function):
raise TypeError(
"cannot use generator function with duet; please convert to "
f"async function instead: {function.__name__}"
)
@functools.wraps(function)
async def wrapped(*args, **kw):
return await awaitable(function(*args, **kw))
return wrapped
async def pmap_async(
func: Callable[[T], Awaitable[U]],
iterable: AnyIterable[T],
limit: Optional[int] = None,
) -> List[U]:
"""Apply an async function to every item in iterable.
Args:
func: Async function called for each element in iterable.
iterable: Iterated over to produce values that are fed to func.
limit: The maximum number of function calls to make concurrently.
Returns:
List of results of all function calls.
"""
async with new_scope() as scope:
return [x async for x in pmap_aiter(scope, func, iterable, limit)]
pmap = sync(pmap_async)
async def pstarmap_async(
func: Callable[..., Awaitable[U]],
iterable: AnyIterable[Any],
limit: Optional[int] = None,
) -> List[U]:
"""Apply an async function to every tuple of args in iterable.
Args:
func: Async function called with each tuple of args in iterable.
iterable: Iterated over to produce arg tuples that are fed to func.
limit: The maximum number of function calls to make concurrently.
Returns:
List of results of all function calls.
"""
return await pmap_async(lambda args: func(*args), iterable, limit)
pstarmap = sync(pstarmap_async)
async def pmap_aiter(
scope: "Scope",
func: Callable[[T], Awaitable[U]],
iterable: AnyIterable[T],
limit: Optional[int] = None,
) -> AsyncIterator[U]:
"""Apply an async function to every item in iterable.
Args:
scope: Scope in which the returned async iterator must be used.
func: Async function called for each element in iterable.
iterable: Iterated over to produce values that are fed to func.
limit: The maximum number of function calls to make concurrently.
Returns:
Asynchronous iterator that yields results in order as they become
available.
"""
collector = AsyncCollector[Tuple[int, U]]()
async def task(i, arg, slot):
try:
value = await func(arg)
collector.add((i, value))
finally:
slot.release()
async def generate():
try:
limiter = Limiter(limit)
async with new_scope() as gen_scope:
async for i, arg in aenumerate(iterable):
slot = await limiter.acquire()
gen_scope.spawn(task, i, arg, slot)
except Exception as e:
collector.error(e)
else:
collector.done()
scope.spawn(generate)
buffer: Dict[int, U] = {}
next_idx = 0
async for i, value in collector:
buffer[i] = value
while next_idx in buffer:
yield buffer.pop(next_idx)
next_idx += 1
while buffer:
yield buffer.pop(next_idx)
next_idx += 1
def pstarmap_aiter(
scope: "Scope",
func: Callable[..., Awaitable[U]],
iterable: AnyIterable[Any],
limit: Optional[int] = None,
) -> AsyncIterator[U]:
"""Apply an async function to every tuple of args in iterable.
Args:
scope: Scope in which the returned async iterator must be used.
func: Async function called with each tuple of args in iterable.
iterable: Iterated over to produce arg tuples that are fed to func.
limit: The maximum number of function calls to make concurrently.
Returns:
Asynchronous iterator that yields results in order as they become
available.
"""
return pmap_aiter(scope, lambda args: func(*args), iterable, limit)
async def sleep(time: float) -> None:
"""Sleeps for the given length of time in seconds."""
try:
async with timeout_scope(time):
await AwaitableFuture()
except TimeoutError:
pass
@asynccontextmanager
async def deadline_scope(deadline: float) -> AsyncIterator[None]:
"""Enter a scope that will exit when the deadline elapses.
Args:
deadline: Absolute time in epoch seconds when the scope should exit.
"""
async with new_scope(deadline=deadline):
yield
@asynccontextmanager
async def timeout_scope(timeout: float) -> AsyncIterator[None]:
"""Enter a scope that will exit when the timeout elapses.
Args:
timeout: Time in seconds from now when the scope should exit.
"""
async with new_scope(timeout=timeout):
yield
@asynccontextmanager
async def new_scope(
*, deadline: Optional[float] = None, timeout: Optional[float] = None
) -> AsyncIterator["Scope"]:
"""Creates a scope in which asynchronous tasks can be launched.
This is inspired by the concept of "nurseries" in trio:
https://trio.readthedocs.io/en/latest/reference-core.html#nurseries-and-spawning
We define the lifetime of a scope using an `async with` statement. Inside
this block we can then spawn new asynchronous tasks which will run in the
background, and the block will only exit when all spawned tasks are done.
If an error is raised by the code in the block itself or by any of the
spawned tasks, all other background tasks will be interrupted and the block
will raise an error.
Args:
deadline: Absolute time in epoch seconds when the scope should exit.
timeout: Time in seconds from now when the scope should exit. If both
deadline and timeout are given, the actual deadline will be
whichever one will elapse first.
"""
main_task = impl.current_task()
scheduler = main_task.scheduler
tasks: Set[impl.Task] = set()
async def finish_tasks():
while True:
await impl.any_ready(tasks)
tasks.intersection_update(scheduler.active_tasks)
if not tasks:
break
if timeout is not None:
if deadline is None:
deadline = scheduler.time() + timeout
else:
deadline = min(deadline, scheduler.time() + timeout)
if deadline is not None:
main_task.push_deadline(deadline)
try:
yield Scope(main_task, scheduler, tasks)
await finish_tasks()
except (impl.Interrupt, Exception) as exc:
# Interrupt remaining tasks.
for task in tasks:
if not task.done:
task.interrupt(main_task, RuntimeError("scope exited"))
# Finish remaining tasks while ignoring further interrupts.
main_task.interruptible = False
await finish_tasks()
main_task.interruptible = True
# If interrupted, raise the underlying error but suppress the context
# (the Interrupt itself) when displaying the traceback.
if isinstance(exc, impl.Interrupt):
exc = exc.error
exc.__suppress_context__ = True
raise exc
finally:
if deadline is not None:
main_task.pop_deadline()
class Scope:
"""Bounds the lifetime of async tasks spawned in the background."""
def __init__(
self, main_task: impl.Task, scheduler: impl.Scheduler, tasks: Set[impl.Task]
) -> None:
self._main_task = main_task
self._scheduler = scheduler
self._tasks = tasks
def cancel(self) -> None:
self._main_task.interrupt(self._main_task, CancelledError())
def spawn(self, func: Callable[..., Awaitable[Any]], *args, **kwds) -> None:
"""Starts a background task that will run the given function."""
task = self._scheduler.spawn(self._run(func, *args, **kwds), main_task=self._main_task)
self._tasks.add(task)
async def _run(self, func: Callable[..., Awaitable[Any]], *args, **kwds) -> None:
task = impl.current_task()
try:
await func(*args, **kwds)
finally:
self._tasks.discard(task)
class Limiter:
"""Limits concurrent access to critical resources or code blocks.
A Limiter is created with a fixed capacity (or None to indicate no limit),
and can then be used with async with blocks to limit access, e.g.:
limiter = Limiter(10)
...
async with limiter:
# At most 10 async calls can be in this section at once.
...
In certain situations, it may not be possible to use async with blocks to
demarcate the critical section. In that case, one can instead call acquire
to get a "slot" that must be released later when done using the resource:
limiter = Limiter(10)
...
slot = await limiter.acquire()
...
slot.release()
"""
def __init__(self, capacity: Optional[int]) -> None:
self._capacity = capacity
self._count = 0
self._waiters: Deque[AwaitableFuture[None]] = collections.deque()
self._available_waiters: List[AwaitableFuture[None]] = []
def is_available(self) -> bool:
"""Returns True if the limiter is available, False otherwise."""
return self._capacity is None or self._count < self._capacity
async def __aenter__(self) -> None:
if not self.is_available():
f = AwaitableFuture[None]()
self._waiters.append(f)
await f
self._count += 1
async def acquire(self) -> "Slot":
await self.__aenter__()
return Slot(self._release)
async def __aexit__(self, exc_type, exc, tb) -> None:
self._release()
def _release(self):
self._count -= 1
if self._waiters:
f = self._waiters.popleft()
f.try_set_result(None)
if self._available_waiters:
for f in self._available_waiters:
f.try_set_result(None)
self._available_waiters = []
async def available(self) -> None:
"""Wait until this limiter is available (i.e. not full to capacity).
Note that this always yields control to the scheduler, even if the
limiter is currently available, to ensure that throttled iterators do
not race ahead of downstream work.
"""
f = AwaitableFuture[None]()
if self.is_available():
f.set_result(None)
else:
self._available_waiters.append(f)
await f
async def throttle(self, iterable: AnyIterable[T]) -> AsyncIterator[T]:
async for value in aiter(iterable):
await self.available()
yield value
@property
def capacity(self) -> Optional[int]:
return self._capacity
@capacity.setter
def capacity(self, capacity: int) -> None:
self._capacity = capacity
class Slot:
def __init__(self, release_func):
self.release_func = release_func
self.called = False
def release(self):
if self.called:
raise Exception("Already released.")
self.called = True
self.release_func()
class LimitedScope(abc.ABC):
"""Combined Scope (for running async iters) and Limiter (for throttling).
Provides convenience methods for running coroutines in parallel within this
scope while throttling to prevent iterators from running too far ahead.
"""
@property
@abc.abstractmethod
def scope(self) -> Scope:
pass
@property
@abc.abstractmethod
def limiter(self) -> Limiter:
pass
def spawn(self, func: Callable[..., Awaitable[Any]], *args, **kwds) -> None:
"""Starts a background task that will run the given function."""
self.scope.spawn(func, *args, **kwds)
async def pmap_async(
self,
func: Callable[[T], Awaitable[U]],
iterable: AnyIterable[T],
) -> List[U]:
return [x async for x in self.pmap_aiter(func, iterable)]
def pmap_aiter(
self,
func: Callable[[T], Awaitable[U]],
iterable: AnyIterable[T],
) -> AsyncIterator[U]:
return pmap_aiter(self.scope, func, self.limiter.throttle(iterable))
async def pstarmap_async(
self,
func: Callable[..., Awaitable[U]],
iterable: AnyIterable[Any],
) -> List[U]:
return [x async for x in self.pstarmap_aiter(func, iterable)]
def pstarmap_aiter(
self,
func: Callable[..., Awaitable[U]],
iterable: AnyIterable[Any],
) -> AsyncIterator[U]:
return pstarmap_aiter(self.scope, func, self.limiter.throttle(iterable))
|
"""
Nisarg Shah
1001553132
"""
import sys
import math
import numpy as np
from statistics import stdev as stdev
def gaussian(x, mean=0.0, sigma=1.0):
temp = float((x-mean)/sigma)
e_factor = np.exp(-(np.power(temp,2) / 2))
deno = sigma*(np.sqrt(2*np.pi))
return e_factor / deno
def naive_bayes(train_file, test_file):
#print(train_file, test_file)
try:
import pandas as pd
train_data = pd.read_csv(train_file, header = None)
test_data = pd.read_csv(test_file, header=None)
X_test = train_data.iloc[:,:-1]
X_test = X_test.astype(np.float)
y_test = train_data.iloc[:,-1]
y_test = y_test.astype(np.int)
X_train = test_data.iloc[:,-1]
X_train = X_train.astype(np.float)
y_train = test_data.iloc[:,-1]
y_train = y_train.astype(np.int)
#print("From Pandas")
except:
train_data = np.genfromtxt(train_file)
test_data = np.genfromtxt(test_file)
X_test = test_data[:, :-1]
X_train = train_data[:, :-1]
y_test = test_data[:, -1]
y_train = train_data[:, -1]
X_test = X_test.astype(np.float)
y_test = y_test.astype(np.int)
X_train = X_train.astype(np.float)
y_train = y_train.astype(np.int)
#Seperating Training examples and labels.
#print(X_train)
class_means = []
class_std = []
#print(X_train[0][1])
indexes = []
y_train = np.asarray(y_train)
for i in range(1,11):
x = np.where(y_train == i)
#print(x)
x = np.asarray(x)
temp = []
#print(x[0])
for j in range(0, x.shape[1]):
temp.append(X_train[x[0][j],:])
#print(temp)
temp = np.asarray(temp)
for j in range(0,8):
temp2 = temp[:,j]
temp2 = np.asarray(temp2)
mean = temp2.mean()
#std = temp2.std()
std = stdev(temp2)
if std<=0.01:
std = 0.01
class_means.append(mean)
class_std.append(std)
#print("mean %.2f std %.2f" %(mean, std))
a = 0
for i in range(1,11):
for j in range(1,9):
print("Class %d, attribute %d, mean = %.2f, std = %.2f" % (i, j, class_means[a], class_std[a]))
a+=1
#Finding the prior Probability for each class.
num_classes = len(np.unique(y_train))
#print(num_classes)
#print(y_train.min())
min_class = y_train.min()
prior_prob = []
training_examples = (X_train.shape[0])
for i in range(min_class,num_classes+1):
ind = np.where(y_train == i)
ind = np.asarray(ind)
p_C = ind.shape[1]/training_examples
prior_prob.append(p_C)
#print(" Class ",i, "Prior Probability " ,p_C)
prior_prob = np.asarray(prior_prob)
final_prob = []
class_final = []
a = 0
q=0
p_x_given_C = 1
###Classification Stage:
for i in range(0,len(y_test)):
temp_prob = []
for j in range(0, num_classes):
for k in range(0,X_train.shape[1]):
attr = X_test[i][k]
p_x_given_C *= gaussian(attr, class_means[a], class_std[a])
#print(p_x_given_C)
a+=1
prob = p_x_given_C*prior_prob[j]
#print(p_x_given_C)
#print(prob)
temp_prob.append(prob)
p_x_given_C = 1
a=0
q=0
#print(X_train.shape[1])
temp_prob[:] = [x/np.sum(temp_prob) for x in temp_prob]
final_prob.append(max(temp_prob))
#print(final_prob)
class_final.append(temp_prob.index(max(temp_prob)))
temp_prob.clear()
acc = []
for i in range(0,len(class_final)):
if class_final[i]+1 == y_test[i]:
acc.append(1)
else:
acc.append(0)
print("ID=%5d, predicted=%3d, probability = %.4f, true=%3d, accuracy=%4.2f\n"%(i+1, class_final[i]+1, final_prob[i], y_test[i], acc[i]))
P=1
acc = np.asarray(acc)
print("Accuracy : %.4f"%(np.sum(acc)/len(acc)) )
if __name__ == "__main__":
naive_bayes(sys.argv[1],sys.argv[2])
|
# Helper functions used by other files
from io import BytesIO
import aiohttp
import re
import os
def convert_to_url(url):
""" Makes a search string url-friendly """
# Use regex to remove anything that isn't a number or letter
url = re.sub(r"[^\w\s]", '', url)
# Substitute spaces with a '+' symbol
url = re.sub(r"\s+", '+', url)
return url
async def get_img(ctx):
"""Returns the most recent attachment posted to the channel"""
# regex to check if there is an image url within the message
url_regex = r'(\b(?:(?:https?)|(?:ftp)|(?:file)):\/\/[-A-Z0-9+&@#\/%?=~_|!:,.;]*(?:(?:\.jpg)|(?:\.jpeg)|(?:\.png)|(?:\.gif)))'
log = ctx.message.channel.history(limit=50) # check last 50 messages
async for i in log:
if i.attachments: # if the message has an attachment
url = i.attachments[0].url # attachment url
elif re.search(url_regex,i.clean_content,flags=re.IGNORECASE): # look for url using regex
url = re.search(url_regex,i.clean_content,flags=re.IGNORECASE).group(0)
try:
async with aiohttp.ClientSession() as sess:
async with sess.get(url) as r: # access the url
if r.status == 200:
return BytesIO(await r.read())
except:
pass
def abs_path(path):
"""Returns absolute path of file given"""
script_dir = os.path.dirname(__file__) # absolute path of helpers.py
rel_path = path # relative path
return os.path.join(script_dir, rel_path) # absolute path of file
def islambda(v):
""" Checks if v is a lambda function """
LAMBDA = lambda:0
return isinstance(v, type(LAMBDA)) and v.__name__ == LAMBDA.__name__
|
"""."""
from flask import Blueprint, current_app, request, jsonify
from werkzeug.exceptions import BadRequest, Unauthorized
import jwt
from arxiv import status
import arxiv.users.domain
from arxiv.base import logging
from .services import sessions
logger = logging.getLogger(__name__)
blueprint = Blueprint('authenticator', __name__, url_prefix='')
@blueprint.route('/auth', methods=['GET'])
def authorize():
"""Authorize the request."""
try:
cookie_name = current_app.config['AUTH_SESSION_COOKIE_NAME']
except KeyError as e:
raise RuntimeError('Configuration error: missing parameter') from e
# An authorization token may reside in either the Authorization header
# or in a cookie (set at login).
auth_header = request.headers.get('Authorization')
auth_cookie = request.cookies.get(cookie_name)
if auth_header: # Try the header first.
try:
auth_token = auth_header.split()[1]
except IndexError:
logger.error('Authorization header malformed')
raise BadRequest('Authorization header is malformed')
logger.debug('Got auth token: %s', auth_token)
claims = _authorize_from_header(auth_token)
elif auth_cookie: # Try the cookie second.
logger.debug('Got auth cookie: %s', auth_cookie)
claims = _authorize_from_cookie(auth_cookie)
else:
logger.error('Authorization token not found')
return jsonify({}), status.HTTP_200_OK, {}
jwt_secret = current_app.config['JWT_SECRET']
headers = {'Token': jwt.encode(claims, jwt_secret)}
return jsonify({}), status.HTTP_200_OK, headers
def _authorize_from_cookie(auth_cookie: str) -> dict:
"""Authorize the request based on an auth cookie."""
try:
session = sessions.load(auth_cookie)
except (sessions.exceptions.InvalidToken,
sessions.exceptions.ExpiredToken,
sessions.exceptions.UnknownSession):
logger.error('Invalid user session token')
raise Unauthorized('Not a valid user session token')
claims = arxiv.users.domain.to_dict(session)
return claims
def _authorize_from_header(auth_token: str) -> dict:
"""Authorize the request based on an auth token."""
try:
session = sessions.load_by_id(auth_token)
except (sessions.exceptions.InvalidToken,
sessions.exceptions.ExpiredToken,
sessions.exceptions.UnknownSession):
logger.error('Invalid auth token')
raise Unauthorized('Not a valid auth token')
claims = arxiv.users.domain.to_dict(session)
return claims
|
# red-r linux setup. Several things we need to do here.
# imports
import sys, os, subprocess
# install the required files
os.system("apt-get install python-qt4")
os.system("apt-get install python-docutils")
os.system("apt-get install python-numpy")
os.system("apt-get install python-qwt5-qt4")
os.system('apt-get install python-rpy python-dev python-numarray-ext python-numeric-ext python-matplotlib python-qt4-dev libqwt5-qt4-dev pyqt4-dev-tools sip4 python-qwt5-qt4')
# install the rpy3 and conversion libraries
os.system("python rpy3-setup/setup.py build")
import platform
if platform.architecture()[0] == "64bit":
os.system("cp -r rpy3-setup/build/lib.linux*/rpy3 linux64/rpy3")
else:
os.system("cp -r rpy3-setup/build/lib.linux*/rpy3 linux32/rpy3")
os.system("python redrrpy-setup/setup.py build")
import platform
if platform.architecture()[0] == "64bit":
os.system("cp -r redrrpy-setup/build/lib.linux*/_conversion.so linux64/redrrpy/_conversion.so")
else:
os.system("cp -r redrrpy-setup/build/lib.linux*/_conversion.so linux32/redrrpy/_conversion.so")
# output the shell script file
with open("/usr/bin/RedR", 'w') as f:
f.write("""
#!/bin/bash
# Shell wrapper for R executable.
python %s/canvas/red-RCanvas.pyw""" % os.path.abspath(os.path.split(sys.argv[0])[0]))
f.close()
os.system('chmod 755 /usr/bin/RedR')
|
"""Tests for ops.
"""
import unittest
import numpy as np
import tensorflow as tf
from tf_utils import utils
from tf_utils import ops
class TestOps(unittest.TestCase):
def test_concat_with_shape(self):
data = np.array([[0.5, 0.5, 0.0, 0.0, 0.6, 0.0],
[0.0, 0.6, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.8, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.9, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.2, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.7, 0.7, 0.7]],
dtype=np.float32)
with utils.device_cpu():
with tf.Session():
data_0 = tf.constant(data)
data_1 = tf.constant(data)
concat_0 = ops.concat_with_shape(
0, [data_0, data_1])
concat_1 = ops.concat_with_shape(
1, [data_0, data_1])
concat_0_np = concat_0.eval()
concat_1_np = concat_1.eval()
self.assertTrue(np.array_equal(concat_0_np.shape,
[14, 6]))
self.assertTrue(np.array_equal(concat_1_np.shape,
[7, 12]))
def test_mask_one_row(self):
data = np.array([[0.1, 0.1, 0.1],
[0.1, 0.05, 0.0],
[0.0, 0.0, 0.0],
[0.1, 0.2, 0.3]], dtype=np.float32)
ref = np.array([[0.1, 0.0, 0.0],
[0.1, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.3]], dtype=np.float32)
with utils.device_cpu():
with tf.Session():
res = ops.mask_argmax_row(tf.constant(data))
self.assertTrue(np.array_equal(res.eval(), ref))
if __name__ == '__main__':
unittest.main()
|
import tensorflow as tf
hparams = tf.contrib.training.HParams(
# Audio:
num_mels=80,
n_fft=1024,
sample_rate=22050,
win_length=1024,
hop_length=256,
preemphasis=0.97,
min_level_db=-100,
ref_level_db=20,
# train
lr=6e-4, #1e-3
train_steps=1000000,
epochs=100,
save_model_every=20000,
gen_test_wave_every=20000,
gen_file='./data/mel_spect/LJ001-0001.mel',
logdir_root='./logdir',
tfrecords_dir="./data/tfrecords/",
train_files = 'ljs_train',
# Evaluation tfrecords filename
eval_file = 'ljs_eval.tfrecords',
# Test tfrecords filename
test_file = 'ljs_test.tfrecords',
decay_steps=5000 ,# 8000,
sigma=1.0,#0.707, # paper 1.0
# network
sample_size=16000,
batch_size=12,
upsampling_rate=256, # same as hop_length
n_flows=12,
n_group=8,
n_early_every=4,
n_early_size=2,
# local condition conv1d
lc_conv1d=False, #True
lc_conv1d_layers=2,
lc_conv1d_filter_size=5,
lc_conv1d_filter_num=80,
# local condition encoding
lc_encode=False,
lc_encode_layers=2,
lc_encode_size=128,
# upsampling by transposed conv
transposed_upsampling=True,
transposed_conv_layers=2,
transposed_conv_layer1_stride= 16 ,
transposed_conv_layer2_stride=16,
transposed_conv_layer1_filter_width= 16*2,#1024 ,# 16*5, # filter width greater than stride, then could leverage context lc
transposed_conv_layer2_filter_width=16*2,
transposed_conv_channels=80,
# wavenet
n_layers=8,
residual_channels=256,
skip_channels=256,
kernel_size=3,
)
|
import requests
import sys
import json
import os
import time
import logging
import tabulate
import yaml
import pandas as pd
from pandas import ExcelWriter
from logging.handlers import TimedRotatingFileHandler
requests.packages.urllib3.disable_warnings()
from requests.packages.urllib3.exceptions import InsecureRequestWarning
def get_logger(logfile, level):
'''
Create a logger
'''
if logfile is not None:
'''
Create the log directory if it doesn't exist
'''
fldr = os.path.dirname(logfile)
if not os.path.exists(fldr):
os.makedirs(fldr)
logger = logging.getLogger()
logger.setLevel(level)
log_format = '%(asctime)s | %(levelname)-8s | %(funcName)-20s | %(lineno)-3d | %(message)s'
formatter = logging.Formatter(log_format)
file_handler = TimedRotatingFileHandler(logfile, when='midnight', backupCount=7)
file_handler.setFormatter(formatter)
file_handler.setLevel(level)
logger.addHandler(file_handler)
return logger
return None
class Authentication:
@staticmethod
def get_jsessionid(vmanage_host, vmanage_port, username, password):
api = "/j_security_check"
base_url = "https://%s:%s"%(vmanage_host, vmanage_port)
url = base_url + api
payload = {'j_username' : username, 'j_password' : password}
response = requests.post(url=url, data=payload, verify=False)
try:
cookies = response.headers["Set-Cookie"]
jsessionid = cookies.split(";")
return(jsessionid[0])
except:
if logger is not None:
logger.error("No valid JSESSION ID returned\n")
exit()
@staticmethod
def get_token(vmanage_host, vmanage_port, jsessionid):
headers = {'Cookie': jsessionid}
base_url = "https://%s:%s"%(vmanage_host, vmanage_port)
api = "/dataservice/client/token"
url = base_url + api
response = requests.get(url=url, headers=headers, verify=False)
if response.status_code == 200:
return(response.text)
else:
return None
if __name__ == '__main__':
try:
log_level = logging.DEBUG
logger = get_logger("log/app_route_report.txt", log_level)
try:
start_date = input("Please enter start date(YYYY-MM-DD): ")
time.strptime(start_date, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect start data format, please enter in YYYY-MM-DD")
try:
end_date = input("Please enter end date(YYYY-MM-DD): ")
time.strptime(end_date, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect end data format, please enter in YYYY-MM-DD")
if logger is not None:
logger.info("Loading vManage login details from YAML\n")
with open("vmanage_login.yaml") as f:
config = yaml.safe_load(f.read())
vmanage_host = config["vmanage_host"]
vmanage_port = config["vmanage_port"]
username = config["vmanage_username"]
password = config["vmanage_password"]
Auth = Authentication()
jsessionid = Auth.get_jsessionid(vmanage_host,vmanage_port,username,password)
token = Auth.get_token(vmanage_host,vmanage_port,jsessionid)
if token is not None:
headers = {'Content-Type': "application/json",'Cookie': jsessionid, 'X-XSRF-TOKEN': token}
else:
headers = {'Content-Type': "application/json",'Cookie': jsessionid}
base_url = "https://%s:%s/dataservice"%(vmanage_host,vmanage_port)
# Get Device Inventory details
api_url = "/device"
url = base_url + api_url
response = requests.get(url=url, headers=headers, verify=False)
device_inv = dict()
if response.status_code == 200:
temp = response.json()["data"]
for item in temp:
if item["personality"] == "vedge":
device_inv[item["system-ip"]] = [{'hostname' : item["host-name"]} , {'siteid' : item["site-id"]}]
else:
if logger is not None:
logger.error("Failed to retrieve device inventory\n")
# Get app route statistics for tunnels between Hub routers and Spoke routers.
# open excel file
filename = 'Tunnel Statistics %s.xlsx'%time.strftime("%Y-%m-%d")
writer = ExcelWriter(filename)
for hub in config["hub_routers"]:
api_url = "/statistics/approute/fec/aggregation"
payload = {
"query": {
"condition": "AND",
"rules": [
{
"value": [
start_date+"T00:00:00 UTC",
end_date+"T23:59:59 UTC"
],
"field": "entry_time",
"type": "date",
"operator": "between"
},
{
"value": [
hub["system_ip"]
],
"field": "local_system_ip",
"type": "string",
"operator": "in"
}
]
},
"aggregation": {
"field": [
{
"property": "name",
"sequence": 1,
"size": 6000
},
{
"property": "proto",
"sequence": 2
},
{
"property": "local_system_ip",
"sequence": 3
},
{
"property": "remote_system_ip",
"sequence": 4
}
],
"histogram": {
"property": "entry_time",
"type": "hour",
"interval": 24,
"order": "asc"
},
"metrics": [
{
"property": "latency",
"type": "avg"
},
{
"property": "jitter",
"type": "avg"
},
{
"property": "loss_percentage",
"type": "avg"
},
{
"property": "vqoe_score",
"type": "avg"
}
]
}
}
url = base_url + api_url
response = requests.post(url=url, headers=headers, data=json.dumps(payload), verify=False)
if response.status_code == 200:
app_route_stats = response.json()["data"]
app_route_stats_headers = ["Date", "Hub", "Hub Siteid", "Spoke", "Spoke Siteid", "Tunnel name", "vQoE score", "Latency", "Loss percentage", "Jitter"]
table = list()
date_list = list()
hub_list = list()
hub_siteid_list = list()
spoke_list = list()
spoke_siteid_list = list()
tunnel_name_list = list()
vqoe_list = list()
latency_list = list()
loss_list = list()
jitter_list = list()
print("\nAverage App route statistics between %s and spokes for %s and %s\n"%(device_inv[hub["system_ip"]][0]['hostname'],start_date,end_date))
for item in app_route_stats:
tr = [time.strftime('%m/%d/%Y', time.gmtime(item['entry_time']/1000.)), device_inv[item['local_system_ip']][0]['hostname'], device_inv[item['local_system_ip']][1]['siteid'], device_inv[item['remote_system_ip']][0]['hostname'], device_inv[item['remote_system_ip']][1]['siteid'], item['name'], item['vqoe_score'], item['latency'], item['loss_percentage'], item['jitter']]
table.append(tr)
date_list.append(time.strftime('%m/%d/%Y', time.gmtime(item['entry_time']/1000.)))
hub_list.append(device_inv[item['local_system_ip']][0]['hostname'])
hub_siteid_list.append(device_inv[item['local_system_ip']][1]['siteid'])
spoke_list.append(device_inv[item['remote_system_ip']][0]['hostname'])
spoke_siteid_list.append(device_inv[item['remote_system_ip']][1]['siteid'])
tunnel_name_list.append(item['name'])
vqoe_list.append(item['vqoe_score'])
latency_list.append(item['latency'])
loss_list.append(item['loss_percentage'])
jitter_list.append(item['jitter'])
try:
#print(tabulate.tabulate(table, app_route_stats_headers, tablefmt="fancy_grid"))
excel_content = dict()
excel_content["Date"] = date_list
excel_content["Hub"] = hub_list
excel_content["Hub Siteid"] = hub_siteid_list
excel_content["Spoke"] = spoke_list
excel_content["Spoke Siteid"] = spoke_siteid_list
excel_content["Tunnel name"] = tunnel_name_list
excel_content["vQoE score"] = vqoe_list
excel_content["Latency"] = latency_list
excel_content["Loss percentage"] = loss_list
excel_content["Jitter"] = jitter_list
df = pd.DataFrame(excel_content)
df.to_excel(writer, device_inv[hub["system_ip"]][0]['hostname'] ,index=False)
except UnicodeEncodeError:
print(tabulate.tabulate(table, app_route_stats_headers, tablefmt="grid"))
else:
if logger is not None:
logger.error("Failed to retrieve app route statistics %s\n"%response.text)
writer.save()
print("\nCreated report %s"%filename)
except Exception as e:
print('Exception line number: {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e)
|
#!/usr/bin/env python3
"""Module containing the MakeNdx class and the command line interface."""
import os
import argparse
from pathlib import Path
from biobb_common.generic.biobb_object import BiobbObject
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_common.tools.file_utils import launchlogger
from biobb_md.gromacs.common import get_gromacs_version
from biobb_md.gromacs.common import GromacsVersionError
class MakeNdx(BiobbObject):
"""
| biobb_md MakeNdx
| Wrapper of the `GROMACS make_ndx <http://manual.gromacs.org/current/onlinehelp/gmx-make_ndx.html>`_ module.
| The GROMACS make_ndx module, generates an index file using the atoms of the selection.
Args:
input_structure_path (str): Path to the input GRO/PDB/TPR file. File type: input. `Sample file <https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/make_ndx.tpr>`_. Accepted formats: gro (edam:format_2033), pdb (edam:format_1476), tpr (edam:format_2333).
output_ndx_path (str): Path to the output index NDX file. File type: output. `Sample file <https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_make_ndx.ndx>`_. Accepted formats: ndx (edam:format_2033).
input_ndx_path (str) (Optional): Path to the input index NDX file. File type: input. Accepted formats: ndx (edam:format_2033).
properties (dict - Python dictionary object containing the tool parameters, not input/output files):
* **selection** (*str*) - ("a CA C N O") Heavy atoms. Atom selection string.
* **gmx_lib** (*str*) - (None) Path set GROMACS GMXLIB environment variable.
* **gmx_path** (*str*) - ("gmx") Path to the GROMACS executable binary.
* **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
* **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
* **container_path** (*str*) - (None) Path to the binary executable of your container.
* **container_image** (*str*) - ("gromacs/gromacs:latest") Container Image identifier.
* **container_volume_path** (*str*) - ("/data") Path to an internal directory in the container.
* **container_working_dir** (*str*) - (None) Path to the internal CWD in the container.
* **container_user_id** (*str*) - (None) User number id to be mapped inside the container.
* **container_shell_path** (*str*) - ("/bin/bash") Path to the binary executable of the container shell.
Examples:
This is a use example of how to use the building block from Python::
from biobb_md.gromacs.make_ndx import make_ndx
prop = { 'selection': 'a CA C N O' }
make_ndx(input_structure_path='/path/to/myStructure.gro',
output_ndx_path='/path/to/newIndex.ndx',
properties=prop)
Info:
* wrapped_software:
* name: GROMACS MakeNdx
* version: >5.1
* license: LGPL 2.1
* ontology:
* name: EDAM
* schema: http://edamontology.org/EDAM.owl
"""
def __init__(self, input_structure_path: str, output_ndx_path: str, input_ndx_path: str = None,
properties: dict = None, **kwargs) -> None:
properties = properties or {}
# Call parent class constructor
super().__init__(properties)
# Input/Output files
self.io_dict = {
"in": {"input_structure_path": input_structure_path, "input_ndx_path": input_ndx_path},
"out": {"output_ndx_path": output_ndx_path}
}
# Properties specific for BB
self.selection = properties.get('selection', "a CA C N O")
# Properties common in all GROMACS BB
self.gmx_lib = properties.get('gmx_lib', None)
self.gmx_path = properties.get('gmx_path', 'gmx')
self.gmx_nobackup = properties.get('gmx_nobackup', True)
self.gmx_nocopyright = properties.get('gmx_nocopyright', True)
if self.gmx_nobackup:
self.gmx_path += ' -nobackup'
if self.gmx_nocopyright:
self.gmx_path += ' -nocopyright'
if not self.container_path:
self.gmx_version = get_gromacs_version(self.gmx_path)
# Check the properties
fu.check_properties(self, properties)
@launchlogger
def launch(self) -> int:
"""Execute the :class:`MakeNdx <gromacs.make_ndx.MakeNdx>` object."""
# Setup Biobb
if self.check_restart(): return 0
self.stage_files()
# Create command line
self.cmd = ['echo', '-e', '\'' + self.selection + '\\nq' + '\'', '|',
self.gmx_path, 'make_ndx',
'-f', self.stage_io_dict["in"]["input_structure_path"],
'-o', self.stage_io_dict["out"]["output_ndx_path"]
]
if self.stage_io_dict["in"].get("input_ndx_path")\
and Path(self.stage_io_dict["in"].get("input_ndx_path")).exists():
self.cmd.append('-n')
self.cmd.append(self.stage_io_dict["in"].get("input_ndx_path"))
if self.gmx_lib:
self.environment = os.environ.copy()
self.environment['GMXLIB'] = self.gmx_lib
# Check GROMACS version
if not self.container_path:
if self.gmx_version < 512:
raise GromacsVersionError("Gromacs version should be 5.1.2 or newer %d detected" % self.gmx_version)
fu.log("GROMACS %s %d version detected" % (self.__class__.__name__, self.gmx_version), self.out_log)
# create_cmd_line and execute_command
self.run_biobb()
# Retrieve results
self.copy_to_host()
# Remove temporal files
self.tmp_files.append(self.stage_io_dict.get("unique_dir"))
self.remove_tmp_files()
return self.return_code
def make_ndx(input_structure_path: str, output_ndx_path: str,
input_ndx_path: str = None, properties: dict = None, **kwargs) -> int:
"""Create :class:`MakeNdx <gromacs.make_ndx.MakeNdx>` class and
execute the :meth:`launch() <gromacs.make_ndx.MakeNdx.launch>` method."""
return MakeNdx(input_structure_path=input_structure_path,
output_ndx_path=output_ndx_path,
input_ndx_path=input_ndx_path,
properties=properties, **kwargs).launch()
def main():
"""Command line execution of this building block. Please check the command line documentation."""
parser = argparse.ArgumentParser(description="Wrapper for the GROMACS make_ndx module.",
formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('-c', '--config', required=False, help="This file can be a YAML file, JSON file or JSON string")
# Specific args of each building block
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--input_structure_path', required=True)
required_args.add_argument('--output_ndx_path', required=True)
parser.add_argument('--input_ndx_path', required=False)
args = parser.parse_args()
config = args.config if args.config else None
properties = settings.ConfReader(config=config).get_prop_dic()
# Specific call of each building block
make_ndx(input_structure_path=args.input_structure_path,
output_ndx_path=args.output_ndx_path,
input_ndx_path=args.input_ndx_path,
properties=properties)
if __name__ == '__main__':
main()
|
"""Replacement for Django's migrate command."""
from __future__ import unicode_literals
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.utils.translation import ugettext as _
try:
from django.core.management.commands.migrate import Command as BaseCommand
has_migrate = True
except ImportError:
from django_evolution.compat.commands import BaseCommand
has_migrate = False
class Command(BaseCommand):
"""Command for working with Django migrations.
This wraps the original ``migrate`` command. If Django Evolution is
enabled, this will call ``evolve`` with the necessary parameters for the
``migrate`` call. If disabled, this will call Django's ``migrate``.
There are some differences in our ``migrate``:
* ``--fake`` is not supported, and will show an error if used.
* ``--run-syncdb`` and ``--fake-initial`` are always implied, and cannot
be turned off.
* ``initial_data`` fixtures are not loaded (they were removed in
Django 1.9 anyway).
* ``--no-initial-data`` isn't directly handled, but since initial data
isn't supported, that doesn't impact anything.
"""
def handle(self, *args, **options):
"""Handle the command.
This will validate the arguments and run through the evolution
process.
Args:
*args (list of unicode):
Positional arguments passed on the command line.
**options (dict):
Options parsed by the argument parser.
Raises:
django.core.management.base.CommandError:
Arguments were invalid or something went wrong. Details are
in the message.
"""
if not has_migrate:
raise CommandError(
_('migrate is not available on this version of Django. '
'Use `syncdb` instead.'))
if not getattr(settings, 'DJANGO_EVOLUTION_ENABLED', True):
# Run the original migrate command.
return super(Command, self).handle(*args, **options)
if options.get('migration_name'):
raise CommandError(
_('The migrate command cannot apply a specific migration '
'name when Django Evolution is in use. Set '
'`DJANGO_EVOLUTION_ENABLED = False` in your settings.py '
'to use the original migrate command.'))
if options.get('fake'):
raise CommandError(
_('The migrate command cannot use --fake when Django '
'Evolution is in use. Set '
'`DJANGO_EVOLUTION_ENABLED = False` in your settings.py '
'to use the original migrate command.'))
app_labels = []
if options.get('app_label'):
app_labels.append(options.get('app_label'))
call_command('evolve',
*app_labels,
verbosity=options.get('verbosity'),
interactive=options.get('interactive'),
database=options.get('database'),
execute=True)
|
#!/usr/bin/env python3
import nelly.main
if '__main__' == __name__:
nelly.main.main()
|
from records_mover.records.records_format import DelimitedRecordsFormat
import unittest
import json
class TestDelimitedRecordsFormat(unittest.TestCase):
def test_dumb(self):
records_format = DelimitedRecordsFormat(variant='dumb')
# Should match up with
# https://github.com/bluelabsio/records-mover/blob/master/docs/RECORDS_SPEC.md#dumb-variant
expected_hints = {
'compression': 'GZIP',
'dateformat': 'YYYY-MM-DD',
'datetimeformat': 'YYYY-MM-DD HH:MI:SS',
'datetimeformattz': 'YYYY-MM-DD HH:MI:SSOF',
'doublequote': False,
'encoding': 'UTF8',
'escape': None,
'field-delimiter': ',',
'quotechar': '"',
'quoting': None,
'record-terminator': '\n',
'timeonlyformat': 'HH24:MI:SS',
'header-row': False,
}
self.assertEqual(expected_hints, records_format.hints)
def test_csv(self):
records_format = DelimitedRecordsFormat(variant='csv')
# Should match up with
# https://github.com/bluelabsio/records-mover/blob/master/docs/RECORDS_SPEC.md#csv-variant
expected_hints = {
'compression': 'GZIP',
'dateformat': 'MM/DD/YY',
'datetimeformat': 'MM/DD/YY HH24:MI',
'datetimeformattz': 'MM/DD/YY HH24:MI',
'doublequote': True,
'encoding': 'UTF8',
'escape': None,
'field-delimiter': ',',
'quotechar': '"',
'quoting': 'minimal',
'record-terminator': '\n',
'timeonlyformat': 'HH24:MI:SS',
'header-row': True,
}
self.assertEqual(expected_hints, records_format.hints)
def test_with_altered_hints(self):
records_format = DelimitedRecordsFormat(variant='csv').alter_hints({'quotechar': 'A'})
# Should match up with
# https://github.com/bluelabsio/records-mover/blob/master/docs/RECORDS_SPEC.md#csv-variant
expected_hints = {
'compression': 'GZIP',
'dateformat': 'MM/DD/YY',
'datetimeformat': 'MM/DD/YY HH24:MI',
'datetimeformattz': 'MM/DD/YY HH24:MI',
'doublequote': True,
'encoding': 'UTF8',
'escape': None,
'field-delimiter': ',',
'quotechar': 'A',
'quoting': 'minimal',
'record-terminator': '\n',
'timeonlyformat': 'HH24:MI:SS',
'header-row': True,
}
self.assertEqual(expected_hints, records_format.hints)
self.assertEqual({'quotechar': 'A'}, records_format.custom_hints)
def test_eq(self):
records_format_1 = DelimitedRecordsFormat()
records_format_2 = DelimitedRecordsFormat()
self.assertTrue(records_format_1 == records_format_2)
def test_eq_error(self):
records_format_1 = DelimitedRecordsFormat()
records_format_2 = "wrong type"
self.assertTrue(records_format_1 != records_format_2)
def test_unsupported_variant(self):
with self.assertRaises(NotImplementedError):
DelimitedRecordsFormat(variant='fake_thing_i_just_made_up')
def test_json(self):
records_format = DelimitedRecordsFormat()
self.assertEqual({
'hints': {
'compression': 'GZIP',
'dateformat': 'YYYY-MM-DD',
'datetimeformat': 'YYYY-MM-DD HH24:MI:SS',
'datetimeformattz': 'YYYY-MM-DD HH:MI:SSOF',
'doublequote': False,
'encoding': 'UTF8',
'escape': '\\',
'field-delimiter': ',',
'header-row': False,
'quotechar': '"',
'quoting': None,
'record-terminator': '\n',
'timeonlyformat': 'HH24:MI:SS'},
'type': 'delimited',
'variant': 'bluelabs'
}, json.loads(records_format.json()))
def test_repr(self):
records_format = DelimitedRecordsFormat()
self.assertEqual('DelimitedRecordsFormat(bluelabs)', repr(records_format))
def test_generate_filename_gzip(self):
records_format = DelimitedRecordsFormat(hints={'compression': 'GZIP'})
self.assertEqual('foo.csv.gz', records_format.generate_filename('foo'))
def test_generate_filename_bzip(self):
records_format = DelimitedRecordsFormat(hints={'compression': 'BZIP'})
self.assertEqual('foo.csv.bz2', records_format.generate_filename('foo'))
def test_generate_filename_no_compression(self):
records_format = DelimitedRecordsFormat(hints={'compression': None})
self.assertEqual('foo.csv', records_format.generate_filename('foo'))
def test_alter_variant(self):
records_format = DelimitedRecordsFormat(variant='csv', hints={'compression': 'BZIP'})
new_records_format = records_format.alter_variant('bigquery')
self.assertEqual(records_format.variant, 'csv')
self.assertEqual(new_records_format.variant, 'bigquery')
|
# coding=utf-8
# Copyright 2021 The Reach ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the tf agent."""
import enum
from ibc.ibc.agents import ibc_agent
from ibc.ibc.agents import mdn_agent
from ibc.ibc.agents import mse_agent
import tensorflow as tf
class LossType(enum.Enum):
EBM = 'ebm'
MSE = 'mse'
MDN = 'mdn'
class WarmupSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Implements learning rate warmup."""
def __init__(self, lr, d_model=32, warmup_steps=4000):
super(WarmupSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
self.lr = lr
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) * self.lr
def get_agent(loss_type,
time_step_tensor_spec,
action_tensor_spec,
action_sampling_spec,
obs_norm_layer,
act_norm_layer,
act_denorm_layer,
learning_rate,
use_warmup,
cloning_network,
train_step,
decay_steps):
"""Creates tfagent."""
if use_warmup:
learning_rate_schedule = WarmupSchedule(lr=learning_rate)
else:
learning_rate_schedule = (
tf.keras.optimizers.schedules.ExponentialDecay(
learning_rate, decay_steps=decay_steps, decay_rate=0.99))
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate_schedule)
if loss_type == LossType.EBM.value:
agent_class = ibc_agent.ImplicitBCAgent
elif loss_type == LossType.MSE.value:
agent_class = mse_agent.MseBehavioralCloningAgent
elif loss_type == LossType.MDN.value:
agent_class = mdn_agent.MdnBehavioralCloningAgent
else:
raise ValueError("Unsupported loss type, can't retrieve an agent.")
agent = agent_class(
time_step_spec=time_step_tensor_spec,
action_spec=action_tensor_spec,
action_sampling_spec=action_sampling_spec,
obs_norm_layer=obs_norm_layer,
act_norm_layer=act_norm_layer,
act_denorm_layer=act_denorm_layer,
cloning_network=cloning_network,
optimizer=optimizer,
train_step_counter=train_step)
agent.initialize()
return agent
|
import sys
sys.path.append('..')
from utils import iplib
if __name__ == "__main__":
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} <IP address>")
exit(-1)
try:
ip = iplib.IPAddress(sys.argv[1])
except:
print(f"{sys.argv[1]} is not a valid IPv4 address")
exit(-1)
print(f"{sys.argv[1]} is from class {ip.get_class()}")
|
from common_fixtures import * # NOQA
from test_services \
import service_with_healthcheck_enabled
from test_machine \
import action_on_digital_ocean_machine, get_dropletid_for_ha_hosts
ha_droplets = []
if_test_host_down = pytest.mark.skipif(
not os.environ.get('DIGITALOCEAN_KEY') or
not os.environ.get('TEST_HOST_DOWN'),
not os.environ.get('HOST_DISCONN_ACTIVE_TIMEOUT'),
not os.environ.get('HOST_ACTIVE_DISCONN_TIMEOUT'),
reason='HOST DOWN PARAMETERS not set')
HOST_DISCONN_ACTIVE_TIMEOUT = os.environ.get('HOST_DISCONN_ACTIVE_TIMEOUT',
900)
HOST_ACTIVE_DISCONN_TIMEOUT = os.environ.get('HOST_ACTIVE_DISCONN_TIMEOUT',
900)
@pytest.fixture(scope='session', autouse=True)
def get_host_droplets(ha_hosts, socat_containers):
ha_droplets.append(get_dropletid_for_ha_hosts())
@pytest.fixture
def check_host_state_power_on(client):
print "Power on hosts that are in disconnected or reconnecting state"
print ha_droplets
inactive_hosts = client.list_host(
kind='docker', removed_null=True, agentState="disconnected")
print "Disconnected hosts:"
print inactive_hosts
reconn_hosts = client.list_host(
kind='docker', removed_null=True, agentState="reconnecting")
print "Reconnecting hosts:"
print reconn_hosts
inactive_hosts_dropletids = []
inactive_hosts_list = []
# Get droplet Id and hosts from disconnected hosts
for host in inactive_hosts:
host_name = host.hostname
print host_name
droplet_id = ha_droplets[0][host_name]
inactive_hosts_dropletids.append(droplet_id)
inactive_hosts_list.append(host)
# Get droplet Id and hosts from reconnecting hosts
# and append to the inactive host/droplet lists
for host in reconn_hosts:
host_name = host.hostname
print host_name
droplet_id = ha_droplets[0][host_name]
inactive_hosts_dropletids.append(droplet_id)
inactive_hosts_list.append(host)
print "List of all disconnected/reconnecting hosts"
print inactive_hosts_list
print inactive_hosts_dropletids
# Power on the droplets
for dropletid in inactive_hosts_dropletids:
print "Power on droplet " + str(droplet_id)
action_on_digital_ocean_machine(dropletid, "power_on")
# Wait for the host agent state to become active
for host in inactive_hosts_list:
print "In host wait method"
wait_for_host_agent_state(client, host, "active", 600)
@if_test_host_down
def test_service_with_healthcheck_1_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
host_down_with_healthcheck_services(
client, host_down_count=1)
@if_test_host_down
def test_service_with_healthcheck_2_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
host_down_with_healthcheck_services(
client, host_down_count=2)
@if_test_host_down
def test_service_with_healthcheck_3_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
host_down_with_healthcheck_services(
client, host_down_count=3)
@if_test_host_down
def test_service_with_healthcheck_and_retainIp_2_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
host_down_with_healthcheck_services(
client, host_down_count=2, retainIp=True)
@if_test_host_down
def test_lbservice_with_healthcheck_1_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
lb_port = "7770"
host_down_with_lb_services(
client, lb_port, host_down_count=1)
@if_test_host_down
def test_lbservice_with_healthcheck_2_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
lb_port = "7771"
host_down_with_lb_services(
client, lb_port, host_down_count=2)
@if_test_host_down
def test_global_lbservice_with_healthcheck_1_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
lb_port = "7772"
host_down_with_lb_services(
client, lb_port, host_down_count=1, globalf=True)
@if_test_host_down
def test_global_lbservice_with_healthcheck_2_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
lb_port = "7773"
host_down_with_lb_services(
client, lb_port, host_down_count=2, globalf=True)
@if_test_host_down
def test_service_with_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
host_down_with_services(
client, host_down_count=2)
@if_test_host_down
def test_global_service_with_host_down(
client, ha_hosts, socat_containers,
check_host_state_power_on):
host_down_with_services(
client, host_down_count=2, globalf=True)
@if_test_host_down
def test_global_service_with_reconnecting_host(
client, ha_hosts, socat_containers,
check_host_state_power_on):
global_service_with_reconn_disconn_host(client, state="reconnecting")
@if_test_host_down
def test_global_service_with_disconnected_host(
client, ha_hosts, socat_containers,
check_host_state_power_on):
global_service_with_reconn_disconn_host(client, state="disconnected")
def global_service_with_reconn_disconn_host(client, state):
# Pick one of the host and power down hosts
host_down = ha_host_list[0]
host_name = ha_host_list[0].hostname
print "power down- " + host_name
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_off")
wait_for_host_agent_state(client, host_down, state,
HOST_ACTIVE_DISCONN_TIMEOUT)
# Create service
launch_config = {"imageUuid": HEALTH_CHECK_IMAGE_UUID}
launch_config["labels"] = {"io.rancher.scheduler.global": "true"}
service, env = create_env_and_svc(client, launch_config)
service = service.activate()
service = client.wait_success(service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == get_service_instance_count(client, service)
# Power on the host
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_on")
wait_for_host_agent_state(client, host_down, "active",
HOST_DISCONN_ACTIVE_TIMEOUT)
service = wait_success(client, service, 300)
container_list = get_service_container_list(client, service)
assert len(container_list) == get_service_instance_count(client, service)
instance_list = get_containers_on_host_for_service(
client, host_down, service)
assert len(instance_list) == 1
@if_test_host_down
def test_global_service_with_inactive_host(
client, ha_hosts, socat_containers,
check_host_state_power_on):
# Pick one of the host and deactivate this host
host_down = ha_host_list[0]
host_name = ha_host_list[0].hostname
print "Deactivate " + host_name
host_down.deactivate()
host_down = wait_for_condition(client,
host_down,
lambda x: x.state == "inactive",
lambda x: 'Host state is ' + x.state,
timeout=300)
# Create service
launch_config = {"imageUuid": HEALTH_CHECK_IMAGE_UUID}
launch_config["labels"] = {"io.rancher.scheduler.global": "true"}
service, env = create_env_and_svc(client, launch_config)
service = service.activate()
service = wait_success(client, service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == get_service_instance_count(client, service)
# Activate the host that is in deactivated state
print "Activate " + host_name
host_down.activate()
host_down = wait_for_condition(client,
host_down,
lambda x: x.state == "active",
lambda x: 'Host state is ' + x.state,
timeout=300)
service = wait_success(client, service, 300)
container_list = get_service_container_list(client, service)
assert len(container_list) == get_service_instance_count(client, service)
instance_list = get_containers_on_host_for_service(
client, host_down, service)
assert len(instance_list) == 1
def host_down_with_lb_services(client, lb_port, host_down_count,
scale=2, lb_scale=2, globalf=False):
# Wait for hosts in "reconnecting" state to get to "active" state
check_hosts_state(client)
# Create environment with lb_service and 2 healthcheck enabled
# service targets
env, lb_service, service1, service2 = \
env_with_lb_service_with_health_check_enabled_targets(
client, lb_port, scale, lb_scale, globalf)
# Pick hosts (and collect instances that will fgo unhealthy) that need
# to be powered down
down_hosts = []
down_instances = []
for i in range(0, len(ha_host_list)):
host = ha_host_list[i]
instance_list = \
get_containers_on_host_for_service(client, host, lb_service)
if len(instance_list) > 0:
down_instances.extend(instance_list)
down_hosts.append(host)
if len(down_hosts) == host_down_count:
break
# Power Down hosts where lb service instances are running
for host in down_hosts:
host_name = host.hostname
print "power down- " + host_name
print ha_droplets[0]
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_off")
print "Waiting for the hosts to go to disconnected state"
for host in down_hosts:
wait_for_host_agent_state(client, host, "disconnected",
HOST_ACTIVE_DISCONN_TIMEOUT)
# Check for service reconcile
check_for_service_reconcile(
client, lb_service, down_instances,
instance_list, globalf)
validate_lb_service(client, lb_service,
lb_port, [service1, service2])
# Power on hosts that were powered off
for host in down_hosts:
host_name = host.hostname
print "power on- " + host_name
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_on")
print "Waiting for the hosts to go to active state"
for host in down_hosts:
wait_for_host_agent_state(client, host, "active",
HOST_DISCONN_ACTIVE_TIMEOUT)
# if service is global, validate that new instances of the service gets
# created on the host that gets powered on
if (globalf):
check_hosts_state(client)
wait_for_scale_to_adjust(client, service1, timeout=300)
wait_for_scale_to_adjust(client, service2, timeout=300)
wait_for_scale_to_adjust(client, lb_service, timeout=300)
validate_lb_service(client, lb_service,
lb_port, [service1, service2])
delete_all(client, [env])
def host_down_with_healthcheck_services(client, host_down_count,
retainIp=False):
# Wait for hosts in "reconnecting" state to get to "active" state
check_hosts_state(client)
# Create service that is healthcheck enabled
scale = 10
env, service = service_with_healthcheck_enabled(
client, scale, retainIp=retainIp)
# Pick hosts (and collect instances that will fgo unhealthy) that need
# to be powered down
down_hosts = []
down_instances = []
for i in range(0, len(ha_host_list)):
host = ha_host_list[i]
instance_list = \
get_containers_on_host_for_service(client, host, service)
if len(instance_list) > 0:
down_instances.extend(instance_list)
down_hosts.append(host)
if len(down_hosts) == host_down_count:
break
# Power Down hosts where service instances are running
for host in down_hosts:
host_name = host.hostname
print "power down- " + host_name
print ha_droplets[0]
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_off")
print "Waiting for the hosts to go to disconnected state"
for host in down_hosts:
wait_for_host_agent_state(client, host, "disconnected",
HOST_ACTIVE_DISCONN_TIMEOUT)
# Check for service reconcile
check_for_service_reconcile(
client, service, down_instances, instance_list)
# If retainIp is turned on , make sure that ip address assigned to
# reconciled instances are the same
if (retainIp):
for con in down_instances:
container_name = con.name
containers = client.list_container(name=container_name,
removed_null=True)
assert len(containers) == 1
container = containers[0]
assert container.primaryIpAddress == con.primaryIpAddress
assert container.externalId != con.externalId
# Power on hosts that were powered off
delete_all(client, [env])
for host in down_hosts:
host_name = host.hostname
print "power on- " + host_name
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_on")
print "Waiting for the hosts to go to active state"
for host in down_hosts:
wait_for_host_agent_state(client, host, "active",
HOST_DISCONN_ACTIVE_TIMEOUT)
def host_down_with_services(client, host_down_count,
globalf=False):
# Wait for hosts in "reconnecting" state to get to "active" state
check_hosts_state(client)
# Create service
launch_config = {"imageUuid": HEALTH_CHECK_IMAGE_UUID}
if globalf:
launch_config["labels"] = {"io.rancher.scheduler.global": "true"}
scale = 0
else:
scale = 10
service, env = create_env_and_svc(client, launch_config, scale)
service = service.activate()
service = client.wait_success(service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == get_service_instance_count(client, service)
# Pick hosts (and collect instances that will go unhealthy) that need
# to be powered down
down_hosts = []
down_instances = []
for i in range(0, len(ha_host_list)):
host = ha_host_list[i]
instance_list = \
get_containers_on_host_for_service(client, host, service)
if len(instance_list) > 0:
down_instances.extend(instance_list)
down_hosts.append(host)
if len(down_hosts) == host_down_count:
break
# Power Down hosts where service instances are running
for host in down_hosts:
host_name = host.hostname
print "power down- " + host_name
print ha_droplets[0]
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_off")
print "Waiting for the hosts to go to disconnected state"
for host in down_hosts:
wait_for_host_agent_state(client, host, "disconnected",
HOST_DISCONN_ACTIVE_TIMEOUT)
# There will be no service reconcile since the instances will continue
# to be "running" state in rancher-server
for con in down_instances:
assert con.state == "running"
service = client.reload(service)
assert service.state == "active"
# Power on hosts that were powered off .
# "stopped" state of the containers on the host will get synced and
# service reconcile will trigger containers to be started.
for host in down_hosts:
host_name = host.hostname
print "power on- " + host_name
action_on_digital_ocean_machine(ha_droplets[0][host_name], "power_on")
print "Waiting for the hosts to go to active state"
for host in down_hosts:
wait_for_host_agent_state(client, host, "active",
HOST_DISCONN_ACTIVE_TIMEOUT)
wait_for_condition(
client, service,
lambda x: x.state == 'active',
lambda x: 'State is: ' + x.state)
for con in down_instances:
assert con.state == "running"
delete_all(client, [env])
def get_containers_on_host_for_service(client, host, service):
instance_list = []
hosts = client.list_host(
kind='docker', removed_null=True, state='active', uuid=host.uuid,
include="instances")
assert len(hosts) == 1
for instance in hosts[0].instances:
containers = client.list_container(
state='running', uuid=instance.uuid, include="services")
assert len(containers) <= 1
if (len(containers) == 1 and
containers[0].createIndex is not None and
containers[0].services[0].id == service.id):
instance_list.append(instance)
return instance_list
def check_for_service_reconcile(client, service, unhealthy_con_list,
instance_list, globalf=False):
# Validate that unhealthy instances in the service get deleted
# This code segment is commented as unhealthy state is
# transient and hard to catch
# for con in unhealthy_con_list:
# wait_for_condition(
# client, con,
# lambda x: x.healthState == 'unhealthy',
# lambda x: 'State is: ' + x.healthState, timeout=180)
# con = client.reload(con)
# assert con.healthState == "unhealthy"
for con in unhealthy_con_list:
wait_for_condition(
client, con,
lambda x: x.state in ('removed', 'purged'),
lambda x: 'State is: ' + x.healthState, timeout=120)
wait_for_scale_to_adjust(client, service, timeout=300)
con = client.reload(con)
assert con.state in ('removed', 'purged')
# Validate all instances in the service are healthy
container_list = get_service_container_list(client, service)
if globalf is False:
assert len(container_list) == service.scale
for con in container_list:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState, timeout=120)
# Validate all existing healthy instances in the service were not deleted
# and recreated
for unhealthy_con in unhealthy_con_list:
for con in instance_list:
if (unhealthy_con.name == con.name):
instance_list.remove(con)
for healthy_con in instance_list:
healthy_con = client.reload(healthy_con)
assert healthy_con.state == "running"
assert healthy_con.healthState == "healthy"
service = client.reload(service)
assert service.state == "active"
assert service.healthState == "healthy"
def check_hosts_state(client, timeout=300):
print "Check if host state is active"
start = time.time()
disconn_host = 1
while disconn_host != 0:
time.sleep(.5)
hosts = client.list_host(
kind='docker', removed_null=True, agentState="disconnected")
disconn_host = len(hosts)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for all hosts to be active in the setup')
# Give some time for hosts that just got to"Active" state to settle down
time.sleep(30)
print "Host is Active"
def env_with_lb_service_with_health_check_enabled_targets(client,
lb_port,
scale=2, lb_scale=2,
globalf=False):
# Create Environment with 2 health check enabled service and 1 LB service
health_check = {"name": "check1", "responseTimeout": 2000,
"interval": 2000, "healthyThreshold": 2,
"unhealthyThreshold": 3,
"requestLine": "GET /name.html HTTP/1.0",
"port": 80}
launch_config = {"imageUuid": HEALTH_CHECK_IMAGE_UUID,
"healthCheck": health_check
}
lb_launch_config = {"ports": [lb_port],
"imageUuid": get_haproxy_image()}
if (globalf):
launch_config["labels"] = {"io.rancher.scheduler.global": "true"}
lb_launch_config["labels"] = {"io.rancher.scheduler.global": "true"}
scale = None
lb_scale = None
service1, env = create_env_and_svc(
client, launch_config, scale)
service1 = activate_svc(client, service1)
container_list = get_service_container_list(client, service1)
assert len(container_list) == get_service_instance_count(client, service1)
for con in container_list:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
service2 = create_svc(client, env, launch_config, scale)
service2 = activate_svc(client, service2)
container_list = get_service_container_list(client, service2)
assert len(container_list) == get_service_instance_count(client, service2)
for con in container_list:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
port_rule1 = {"serviceId": service1.id,
"sourcePort": lb_port,
"targetPort": "80",
"protocol": "http"
}
port_rule2 = {"serviceId": service2.id,
"sourcePort": lb_port,
"targetPort": "80",
"protocol": "http"
}
lb_Config = {"portRules": [port_rule1, port_rule2]}
lb_service = client.create_loadBalancerService(
name="lb-1",
stackId=env.id,
launchConfig=lb_launch_config,
scale=lb_scale,
lbConfig=lb_Config)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
lb_service = activate_svc(client, lb_service)
service_link1 = {"serviceId": service1.id}
service_link2 = {"serviceId": service2.id}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2])
validate_lb_service(client, lb_service,
lb_port, [service1, service2])
return env, lb_service, service1, service2
|
# Generated by Django 3.0.2 on 2020-01-11 13:34
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Accruals",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"cutoff_date",
models.DateField(unique=True, verbose_name="cutoff date"),
),
(
"accruals",
models.DecimalField(
decimal_places=2,
max_digits=10,
validators=[django.core.validators.MinValueValidator(0)],
verbose_name="accruals",
),
),
],
options={
"verbose_name": "accruals",
"verbose_name_plural": "accruals",
"ordering": ["-cutoff_date"],
},
),
]
|
import sys
from PyQt5 import QtWidgets
from ui import UI
app = QtWidgets.QApplication(sys.argv)
ui = UI(app)
sys.exit(app.exec_())
|
from alfred.utils.config import *
from alfred.utils.directory_tree import *
from alfred.utils.misc import create_logger, select_storage_dirs
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--from_file', type=str, default=None,
help="Path containing all the storage_names")
parser.add_argument('--storage_name', type=str, default=None)
parser.add_argument("--root_dir", default=None, type=str)
return parser.parse_args()
def _anonymize_config(from_file, storage_name, root_dir):
logger = create_logger(name="ANONYMIZE CONFIG", loglevel=logging.INFO)
logger.info("\nANONYMIZING Config")
# Select storage_dirs to run over
storage_dirs = select_storage_dirs(from_file, storage_name, root_dir)
# Sanity-check that storages exist
storage_dirs = [storage_dir for storage_dir in storage_dirs if sanity_check_exists(storage_dir, logger)]
for storage_to_copy in storage_dirs:
logger.info(str(storage_to_copy))
seeds_to_copy = get_all_seeds(storage_to_copy)
# find the path to all the configs files
for dir in seeds_to_copy:
config_path = dir / 'config.json'
config = load_dict_from_json(str(config_path))
if 'experiment_name' in config:
logger.info(f"ANONYMIZE -- Removing experiment_name from {str(config_path)}")
del(config['experiment_name'])
else:
logger.info(f"PASS -- {str(config_path)} has no experiment_name. ")
save_dict_to_json(config, filename=str(config_path))
if __name__ == "__main__":
args = get_args()
print(args.__dict__)
_anonymize_config(from_file=args.from_file,
storage_name=args.storage_name,
root_dir=args.root_dir)
|
#!/usr/bin/env python3
# wb2sc_file_converter.py
#
# Copyright 2019 E. Decker
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple tool converting files created by wortverbund_builder to files that
sign_compare can work with."""
import csv
import os
import tkinter as tk
class WortverbundSelecter(tk.Frame):
"""GUI-frame to select a wortverbund of a project and to convert it."""
def __init__(self, master, project):
tk.Frame.__init__(self)
tk.Button(self, font='Arial 16', text='Back', width=7, command=self.__del__).pack()
self.label = tk.Label(self, font='Arial 16', text='Select a wortverbund to convert to a sc-file: ')
self.label.pack()
self.wortverbund_listbox = tk.Listbox(self, font='Arial 16', height=18, width=26)
self.wortverbund_listbox.pack()
self.project = project
if os.listdir('wb_files/'+self.project):
for wortverbund_file in os.listdir('wb_files/'+self.project):
self.wortverbund_listbox.insert('end', wortverbund_file[:-4])
self.convert_button = tk.Button(self, font='Arial 16', text='Convert', width=7, command=self.convert_wortverbund)
self.convert_button.pack()
else:
self.label.forget()
self.wortverbund_listbox.forget()
tk.Label(self, font='Arial 16', text='There is no wortverbund in the project.').pack()
def __del__(self):
self.forget()
ROOT_FRAME.pack()
def convert_wortverbund(self):
"""Converts the selected wortverbund_builder file to a sign_compare file
by reading the features saved in the input file (wortverbund_builder
file) and writing them into the output file (sign_compare file)."""
self.wortverbund_listbox.forget()
self.convert_button.forget()
try:
with open('wb_files/'+self.project+'/'+self.wortverbund_listbox.get('active')+'.csv', 'r') as csv_file:
content_of_csv_file = csv.reader(csv_file, delimiter=';')
self.feature_string = ''
for row in content_of_csv_file:
if row[0]:
self.feature_string += row[0]+';'
if self.feature_string:
# If there is no sign_compare file (or even no directory) with
# the same name as the selected wortverbund_builder file:
# creates a new sign_compare file.
if not os.path.exists('sc_files'):
os.makedirs('sc_files')
if not os.path.exists('sc_files/'+self.wortverbund_listbox.get('active')+'.txt'):
self.save_converted_file_0(0)
# If a sign_compare file with the same name as the selected
# wortverbund_builder file already exists: 3 new options.
else:
self.label['text'] = 'A sign \"'+self.wortverbund_listbox.get('active')+'\" already exists!\nWhat do you want to do?'
self.convert_button = tk.Button(self, font='Arial 16', text='Append existing file', width=40, command=self.save_by_appending)
self.convert_button.pack()
self.replace_button = tk.Button(self, font='Arial 16', text='Replace existing file (the old file will be lost)', width=40, command=self.save_by_replacing)
self.replace_button.pack()
self.rename_button = tk.Button(self, font='Arial 16', text='Rename existing file and save the newly converted', width=40, command=self.rename_and_save)
self.rename_button.pack()
else:
self.label['text'] = '\"'+self.wortverbund_listbox.get('active')+'\" was not converted because there are no features in it!'
except IOError:
self.label['text'] = 'Sorry, \"'+self.wortverbund_listbox.get('active')+'\" couldn\'t be converted!'
def save_by_appending(self):
"""Saves the features of the input file (wortverbund_builder file) in an
already existing sign_compare file by adding them to it."""
self.save_converted_file_0(0)
def save_by_replacing(self):
"""Saves the features of the input file (wortverbund_builder file) in a
"new" sign_compare file replacing the one that already existed -
this means that the old sign_compare file gets lost."""
self.save_converted_file_0(1)
def rename_and_save(self):
"""Allows to rename an already existing sign_compare file and to save
the features of the input file (wortverbund_builder file) in a
new sign_compare file - i.e. creating a new file and keeping the old
one."""
self.convert_button.forget()
self.replace_button.forget()
self.rename_button.forget()
self.label['text'] = 'Enter new name for the existing sign \"'+self.wortverbund_listbox.get('active')+'\": '
self.entry = tk.Entry(self, font='Arial 16', width=16)
self.entry.pack()
self.convert_button = tk.Button(self, font='Arial 16', text='Rename and save', width=15, command=self.save_converted_file_1)
self.convert_button.pack()
def save_converted_file_0(self, case):
self.convert_button.forget()
try:
self.replace_button.forget()
self.rename_button.forget()
except AttributeError:
pass
if case == 0: # coming from "self.save_by_appending"
sign_compare_file = open('sc_files/'+self.wortverbund_listbox.get('active')+'.txt', 'a')
else: # coming from "self.save_by_replacing"
sign_compare_file = open('sc_files/'+self.wortverbund_listbox.get('active')+'.txt', 'w')
sign_compare_file.write(self.feature_string)
sign_compare_file.close()
self.label['text'] = '\"'+self.wortverbund_listbox.get('active')+'\" converted!'
def save_converted_file_1(self): # coming from "self.rename_and_save"
self.convert_button.forget()
self.entry.forget()
try:
os.rename('sc_files/'+self.wortverbund_listbox.get('active')+'.txt', 'sc_files/'+self.entry.get()+'.txt')
with open('sc_files/'+self.wortverbund_listbox.get('active')+'.txt', 'w') as sign_compare_file:
sign_compare_file.write(self.feature_string)
self.label['text'] = '\"'+self.wortverbund_listbox.get('active')+'\" converted and already existing file renamed \"'+self.entry.get()+'\"!'
except:
self.label['text'] = 'Your new file name was not accepted!'
def select_project():
ROOT_FRAME.forget()
WortverbundSelecter(ROOT, project_listbox.get('active')).pack()
ROOT = tk.Tk()
ROOT.title('wb2sc_file_converter')
# Main frame to select a wortverbund_builder project.
ROOT_FRAME = tk.Frame(ROOT)
project_listbox = tk.Listbox(ROOT_FRAME, font='Arial 16', height=18, width=26)
project_listbox.pack()
try:
no_projects = True
for project in os.listdir('wb_files'):
if not '.' in project:
project_listbox.insert('end', project)
no_projects = False
if no_projects:
raise IOError
else:
tk.Button(ROOT_FRAME, font='Arial 16', text='Select project', width=26, command=select_project).pack()
except IOError:
project_listbox.forget()
tk.Label(ROOT_FRAME, font='Arial 16', text='\nThere are no wortverbund_builder projects!\n').pack()
ROOT_FRAME.pack()
ROOT.mainloop()
|
def countSegments(s):
s = s.lstrip()
s = s.rstrip()
count = 1
for i in range (len(s)):
if s[i]==" " and s[i+1]!=" ":
count+=1
return count
s = ", , , , a, eaefa"
print(countSegments(s))
|
import time
class SessionHelper:
def __init__(self, app):
self.app = app
def Marker_login(self, username, password):
wd = self.app.wd
self.app.open_marker_home_page()
wd.find_element_by_css_selector("button.mainbtn").click()
wd.find_element_by_name("username").click()
wd.find_element_by_name("username").clear()
wd.find_element_by_name("username").click()
wd.find_element_by_name("username").send_keys(username)
time.sleep(5)
wd.find_element_by_name("password").click()
wd.find_element_by_name("password").clear()
wd.find_element_by_name("username").click()
wd.find_element_by_name("password").send_keys(password)
time.sleep(5)
wd.find_element_by_css_selector("button.navbar-btn.mainbtn").click()
def Marker_logout(self):
wd = self.app.wd
wd.find_element_by_css_selector("div.headernav__uname").click()
wd.find_element_by_link_text("Выйти из системы").click()
def ensure_Marker_logout(self):
wd = self.app.wd
if self.is_logged_in_marker():
self.Marker_logout()
def is_logged_in_marker(self):
wd = self.app.wd
return len(wd.find_elements_by_css_selector("img.headernav__icon.notifyico")) > 0
def is_logged_in_as_marker(self, username):
wd = self.app.wd
return self.get_logged_user_marker() == username
def get_logged_user_marker(self):
wd = self.app.wd
wd.find_element_by_css_selector("div.headernav__uname").click()
wd.find_element_by_link_text("Личный кабинет").click()
fulltext = wd.find_element_by_xpath("//ng-component/main/ng-component/div/div[2]/div/div[1]/div[1]/div/div[2]/span").text
return wd.find_element_by_xpath("//ng-component/main/ng-component/div/div[2]/div/div[1]/div[1]/div/div[2]/span").text[11:len(fulltext)]
def ensure_login_marker(self, username, password):
wd = self.app.wd
if self.is_logged_in_marker():
if self.is_logged_in_as_marker(username):
return
else:
self.Marker_logout()
self.Marker_login(username, password)
def open_marker_page(self, page):
wd = self.app.wd
baseUrlMarker = self.app.baseUrlMarker
wd.get(baseUrlMarker + page)
self.app.wait_page_load2(60)
def sm_login(self, username, password):
wd = self.app.wd
self.app.open_sm_home_page()
wd.find_element_by_css_selector("a.button.login").click()
#if not self.is_logged_in_sm():
wd.find_element_by_name("UserName").click()
wd.find_element_by_name("UserName").clear()
wd.find_element_by_name("UserName").click()
wd.find_element_by_name("UserName").send_keys(username)
time.sleep(5)
wd.find_element_by_name("Password").click()
wd.find_element_by_name("Password").clear()
wd.find_element_by_name("UserName").click()
wd.find_element_by_name("Password").send_keys(password)
time.sleep(5)
wd.find_element_by_xpath("//form[@id='form-login']//button[.='Войти']").click()
def sm_admin_login(self, username, password):
wd = self.app.wd
self.app.open_sm_admin_home_page()
wd.find_element_by_css_selector("a.button.login").click()
#if not self.is_logged_in_sm():
wd.find_element_by_name("UserName").click()
wd.find_element_by_name("UserName").clear()
wd.find_element_by_name("UserName").click()
wd.find_element_by_name("UserName").send_keys(username)
time.sleep(5)
wd.find_element_by_name("Password").click()
wd.find_element_by_name("Password").clear()
wd.find_element_by_name("UserName").click()
wd.find_element_by_name("Password").send_keys(password)
time.sleep(5)
wd.find_element_by_xpath("//form[@id='form-login']//button[.='Войти']").click()
def sm_logout(self):
wd = self.app.wd
baseUrlSM = self.app.baseUrlSM
smlogout = self.app.smlogout
#wd.maximize_window()
wd.get(baseUrlSM + smlogout)
def sm_admin_logout(self):
wd = self.app.wd
baseAdminUrlMarker = self.app.baseAdminUrlMarker
smlogout = self.app.smlogout
#wd.maximize_window()
wd.get(baseAdminUrlMarker + smlogout)
def ensure_logout_sm(self):
wd = self.app.wd
if self.is_logged_in_sm():
self.sm_logout()
def is_logged_in_sm(self):
wd = self.app.wd
return len(wd.find_elements_by_css_selector("span.hdr_user-menu_name")) > 0
def is_logged_in_as_sm(self, username):
wd = self.app.wd
return self.get_logged_user_sm() == username
def get_logged_user_sm(self):
wd = self.app.wd
self.app.wait_smBlock(60)
text1 = wd.find_element_by_css_selector("span.hdr_user-menu_name").get_attribute("textContent")
return text1
def ensure_login_sm(self, username, password):
wd = self.app.wd
self.app.wait_smBlock(60)
if self.is_logged_in_sm():
if self.is_logged_in_as_sm(username):
return
else:
self.sm_logout()
self.sm_login(username, password)
def ensure_admin_login_sm(self, username, password):
wd = self.app.wd
self.app.wait_smBlock(60)
if self.is_logged_in_sm():
if self.is_logged_in_as_sm(username):
return
else:
self.sm_admin_logout()
self.sm_admin_login(username, password)
def open_SM_page(self, page):
wd = self.app.wd
baseUrlSM = self.app.baseUrlSM
wd.get(baseUrlSM + page)
self.app.wait_smBlock(5)
def open_admin_SM_page(self, page):
wd = self.app.wd
baseAdminUrlMarker = self.app.baseAdminUrlMarker
wd.get(baseAdminUrlMarker + page)
self.app.wait_smBlock(5)
def open_href_page(self, page):
wd = self.app.wd
wd.get(page)
self.app.wait_smBlock(5)
def is_marker(self):
try:
self.app.wd.current_url.startswith(self.app.baseUrlMarker)
return True
except:
return False
def is_sm_blocked(self):
try:
text = self.app.wd.find_element_by_id("smBlock").value_of_css_property("display")
if text == 'block':
return True
except:
return False
def is_sm_artef_blocked(self):
try:
text = self.app.wd.find_element_by_css_selector("div.dlg-content_loader.dlg-content_loader--center").value_of_css_property("display")
if text == 'block':
return True
except:
return False
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml.test.utils import unit_param, \
quality_param, \
stress_param
from cuml.neighbors import KNeighborsRegressor as lKNNReg
from cuml.dask.neighbors import KNeighborsRegressor as dKNNReg
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import dask.array as da
import dask.dataframe as dd
from cuml.dask.common.dask_arr_utils import to_dask_cudf
from cudf.core.dataframe import DataFrame
import numpy as np
import cudf
def generate_dask_array(np_array, n_parts):
n_samples = np_array.shape[0]
n_samples_per_part = int(n_samples / n_parts)
chunks = [n_samples_per_part] * n_parts
chunks[-1] += n_samples % n_samples_per_part
chunks = tuple(chunks)
return da.from_array(np_array, chunks=(chunks, -1))
@pytest.fixture(
scope="module",
params=[
unit_param({'n_samples': 3000, 'n_features': 30,
'n_classes': 5, 'n_targets': 2}),
quality_param({'n_samples': 8000, 'n_features': 35,
'n_classes': 12, 'n_targets': 3}),
stress_param({'n_samples': 20000, 'n_features': 40,
'n_classes': 12, 'n_targets': 4})
])
def dataset(request):
X, y = make_multilabel_classification(
n_samples=int(request.param['n_samples'] * 1.2),
n_features=request.param['n_features'],
n_classes=request.param['n_classes'],
n_labels=request.param['n_classes'],
length=request.param['n_targets'])
new_x = []
new_y = []
for i in range(y.shape[0]):
a = np.argwhere(y[i] == 1)[:, 0]
if len(a) >= request.param['n_targets']:
new_x.append(i)
np.random.shuffle(a)
a = a[:request.param['n_targets']]
new_y.append(a)
if len(new_x) >= request.param['n_samples']:
break
X = X[new_x]
noise = np.random.normal(0, 5., X.shape)
X += noise
y = np.array(new_y, dtype=np.float32)
return train_test_split(X, y, test_size=0.3)
def exact_match(l_outputs, d_outputs):
# Check shapes
assert l_outputs.shape == d_outputs.shape
# Predictions should match
correct_queries = (l_outputs == d_outputs).all(axis=1)
assert np.mean(correct_queries) > 0.95
@pytest.mark.parametrize("datatype", ['dask_array', 'dask_cudf'])
@pytest.mark.parametrize("parameters", [(1, 3, 256),
(8, 8, 256),
(9, 3, 128)])
def test_predict_and_score(dataset, datatype, parameters, client):
n_neighbors, n_parts, batch_size = parameters
X_train, X_test, y_train, y_test = dataset
l_model = lKNNReg(n_neighbors=n_neighbors)
l_model.fit(X_train, y_train)
l_outputs = l_model.predict(X_test)
handmade_local_score = r2_score(y_test, l_outputs)
handmade_local_score = round(float(handmade_local_score), 3)
X_train = generate_dask_array(X_train, n_parts)
X_test = generate_dask_array(X_test, n_parts)
y_train = generate_dask_array(y_train, n_parts)
y_test = generate_dask_array(y_test, n_parts)
if datatype == 'dask_cudf':
X_train = to_dask_cudf(X_train, client)
X_test = to_dask_cudf(X_test, client)
y_train = to_dask_cudf(y_train, client)
y_test = to_dask_cudf(y_test, client)
d_model = dKNNReg(client=client, n_neighbors=n_neighbors,
batch_size=batch_size)
d_model.fit(X_train, y_train)
d_outputs = d_model.predict(X_test, convert_dtype=True)
d_outputs = d_outputs.compute()
d_outputs = d_outputs.as_matrix() \
if isinstance(d_outputs, DataFrame) \
else d_outputs
exact_match(l_outputs, d_outputs)
distributed_score = d_model.score(X_test, y_test)
distributed_score = round(float(distributed_score), 3)
assert distributed_score == pytest.approx(handmade_local_score, abs=1e-2)
@pytest.mark.parametrize('input_type', ['array', 'dataframe'])
def test_predict_1D_labels(input_type, client):
# Testing that nothing crashes with 1D labels
X, y = make_regression(n_samples=10000)
if input_type == 'array':
dX = da.from_array(X)
dy = da.from_array(y)
elif input_type == 'dataframe':
X = cudf.DataFrame(X)
y = cudf.Series(y)
dX = dd.from_pandas(X, npartitions=1)
dy = dd.from_pandas(y, npartitions=1)
clf = dKNNReg()
clf.fit(dX, dy)
clf.predict(dX)
|
import numpy as np
import pytest
from highway_env.road.lane import StraightLane, CircularLane, PolyLane
from highway_env.road.road import Road, RoadNetwork
from highway_env.vehicle.controller import ControlledVehicle
@pytest.fixture
def net() -> RoadNetwork:
# Diamond
net = RoadNetwork()
net.add_lane(0, 1, StraightLane([0, 0], [10, 0]))
net.add_lane(1, 2, StraightLane([10, 0], [5, 5]))
net.add_lane(2, 0, StraightLane([5, 5], [0, 0]))
net.add_lane(1, 3, StraightLane([10, 0], [5, -5]))
net.add_lane(3, 0, StraightLane([5, -5], [0, 0]))
print(net.graph)
return net
def test_network(net):
# Road
road = Road(network=net)
v = ControlledVehicle(road, [5, 0], heading=0, target_speed=2)
road.vehicles.append(v)
assert v.lane_index == (0, 1, 0)
# Lane changes
dt = 1/15
lane_index = v.target_lane_index
lane_changes = 0
for _ in range(int(20/dt)):
road.act()
road.step(dt)
if lane_index != v.target_lane_index:
lane_index = v.target_lane_index
lane_changes += 1
assert lane_changes >= 3
def test_network_to_from_config(net):
config_dict = net.to_config()
net_2 = RoadNetwork.from_config(config_dict)
assert len(net.graph) == len(net_2.graph)
def test_polylane():
lane = CircularLane(
center=[0, 0],
radius=10,
start_phase=0,
end_phase=3.14,
)
num_samples = int(lane.length / 5)
sampled_centreline = [
lane.position(longitudinal=lon, lateral=0)
for lon in np.linspace(0, lane.length, num_samples)
]
sampled_left_boundary = [
lane.position(longitudinal=lon, lateral=0.5 * lane.width_at(longitudinal=lon))
for lon in np.linspace(0, lane.length, num_samples)
]
sampled_right_boundary = [
lane.position(longitudinal=lon, lateral=-0.5 * lane.width_at(longitudinal=lon))
for lon in np.linspace(0, lane.length, num_samples)
]
polylane = PolyLane(
lane_points=sampled_centreline,
left_boundary_points=sampled_left_boundary,
right_boundary_points=sampled_right_boundary,
)
# sample boundaries from both lanes and assert equal
num_samples = int(lane.length / 3)
# original lane
sampled_centreline = [
lane.position(longitudinal=lon, lateral=0)
for lon in np.linspace(0, lane.length, num_samples)
]
sampled_left_boundary = [
lane.position(longitudinal=lon, lateral=0.5 * lane.width_at(longitudinal=lon))
for lon in np.linspace(0, lane.length, num_samples)
]
sampled_right_boundary = [
lane.position(longitudinal=lon, lateral=-0.5 * lane.width_at(longitudinal=lon))
for lon in np.linspace(0, lane.length, num_samples)
]
# polylane
polylane_sampled_centreline = [
polylane.position(longitudinal=lon, lateral=0)
for lon in np.linspace(0, polylane.length, num_samples)
]
polylane_sampled_left_boundary = [
polylane.position(
longitudinal=lon, lateral=0.5 * polylane.width_at(longitudinal=lon)
)
for lon in np.linspace(0, polylane.length, num_samples)
]
polylane_sampled_right_boundary = [
polylane.position(
longitudinal=lon, lateral=-0.5 * polylane.width_at(longitudinal=lon)
)
for lon in np.linspace(0, polylane.length, num_samples)
]
# assert equal (very coarse because of coarse sampling)
assert all(
np.linalg.norm(
np.array(sampled_centreline) - np.array(polylane_sampled_centreline), axis=1
)
< 0.7
)
assert all(
np.linalg.norm(
np.array(sampled_left_boundary) - np.array(polylane_sampled_left_boundary),
axis=1,
)
< 0.7
)
assert all(
np.linalg.norm(
np.array(sampled_right_boundary)
- np.array(polylane_sampled_right_boundary),
axis=1,
)
< 0.7
)
|
from OpenGLCffi.GL import params
@params(api='gl', prms=['pname', 'param'])
def glConservativeRasterParameteriNV(pname, param):
pass
|
num = int(input('Digite um número inteiro: '))
choose = int(input('Qual a base de conversão?'
'\n[1] para binário'
'\n[2] para octal'
'\n[3] para hexadecimal '
'\nDigite sua opção: '))
if choose == 1:
print(f'\033[37;1m{num}\033[m convertido para binário vale \033[31;1m{num:b}')
elif choose == 2:
print(f'\033[37;1m{num}\033[m convertido para octal vale \033[32;1m{num:o}')
elif choose == 3:
hexa = f'{num:x}'
print(f'\033[37;1m{num}\033[m convertido hexadecimal vale \033[33;1m{hexa.upper()}')
else:
print('Digite um número\033[36;1m correspondente\033[m a uma base')
|
import spacy
import pandas as pd
import wikipediaapi
import csv
from IPython.display import display
from tabulate import tabulate
wiki_wiki = wikipediaapi.Wikipedia('en')
while True:
try:
chemical = input("Write the name of entity: ")
page_py = wiki_wiki.page(chemical)
sumary = page_py.summary[0:]
nlp = spacy.load('en_core_web_sm')
sent_list = [sent.text for sent in nlp(sumary).sents]
cumul_sent_list = [sent_list[0], ' '.join(sent_list[:2]), ' '.join(sent_list)]
df = pd.DataFrame({'Entity': chemical, 'Description': cumul_sent_list})
df["Sentences"] = pd.Series([f"Sentence1-{i+1}" for i in range(len(cumul_sent_list))])
df = df.pivot(index="Entity", columns="Sentences", values="Description")
#for col1,col2 in zip(text1, text2):
filename = 'out.csv'
df.to_csv(filename, mode='a', header=False)
print(tabulate(df, headers = 'keys', tablefmt = 'psql'))
except wikipedia.exception.PageError as e:
print(e.message)
if chemical == "":
break
else:
print('Continue to the next entities...')
|
#!/usr/bin/python
#https://practice.geeksforgeeks.org/problems/common-subsequence/0
def sol(a, b):
"""
Follows the standard DP approach of finding common subsequence.
The common length can be >=1 so if we find a common character in both
the strings, it does the job
"""
m = len(a)
n = len(b)
dp = [[0 for _ in range(n+1)] for _ in range(m+1)]
for i in range(1, m+1):
for j in range(1, n+1):
if a[i-1] == b[j-1]:
dp[i][j] = 1 + dp[i-1][j-1]
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
if dp[i][j] >= 1:
return 1
return 0
|
from charm import SplunkCharm
from ops.testing import Harness
import pytest
@pytest.fixture
def harness():
_harness = Harness(SplunkCharm)
_harness.set_model_name("testing")
_harness.begin()
yield _harness
_harness.cleanup()
|
from django.contrib import admin
from paper.models import PaperInfo
@admin.register(PaperInfo)
class PaperInfoAdmin(admin.ModelAdmin):
# 表头显示
list_display = (
'paper_id', 'title', 'degree', 'subject', 'score', 'owner_id', 'creator_id', 'is_public',
'create_time', 'update_time'
)
# 支持搜索的字段
search_fields = (
'paper_id', 'title', 'owner_id', 'creator_id'
)
# 支持进入编辑模式的字段
list_display_links = (
'title',
)
# 过滤器
list_filter = (
'degree', 'is_public'
)
# 分页页数
list_per_page = 20
|
mixed_list = ['cat', 5, 'flower', 10]
string_list = []
num_list = []
for item in mixed_list:
if type(item)== str:
string_list.append(item)
else:
num_list.append(item)
print(string_list)
print(num_list)
|
from credentials import INSTANCE_CONNECTION_NAME
def main():
f = open("Makefile", 'a')
sql = '"{}"=tcp:{}'.format(INSTANCE_CONNECTION_NAME, 3306)
f.write(sql)
f.close()
print("Makefile updated!")
if __name__ == "__main__":
main()
|
"""
This scripts allows the user to create train, validation, test datasets by:
* Dropping unnecessary columns
* Removing Duplicates
* Creating time window pairs
* Splitting with Session UID to prevent any data leakages
This file can be imported as a module to use following functions:
* prepare_datasets()
* get_dfs()
Usage:
from src.data_prep import prepare_datasets, get_dfs
train_df, val_df, test_df = prepare_datasets(data_path, single_val_cols, multi_val_cols)
"""
from audioop import mul
from genericpath import exists
import pandas as pd
import numpy as np
from tqdm import tqdm
import random
import os.path as op
import os
# to create subsequence data samples
time_windows = {
"5": [(0, 5), (5, 10), (10, 15)],
"10": [(0, 10), (5, 15)],
"15": [(0, 15), (15, 30)],
"30": [(0, 30), (30, 60), (60, 90), (90, 120)],
"60": [(0, 60), (60, 120)]
}
def clean_dataframe_dct(data, single_val_cols, multi_val_cols):
"""
Removes rows that has no forecast samples
Selects rows that contain relevant forecast sample for each (session_uid, timestamp) tuple
Parameters
----------
data : DataFrame
The dataframe which is created from weather.csv file
single_val_cols : list
Columns which has only single values. Example: "M_TOTAL_LAPS"
multi_val_cols : list
Columns which might have multiple values. Example: "M_WEATHER_FORECAST_SAMPLES_M_WEATHER",
Returns
-------
dct: Dictionary
that maps (session_uid, timestamp) tuples to tables that has corresponding forecast sample rows
"""
dct = {}
columns = ["M_SESSION_UID","TIMESTAMP"] + single_val_cols + multi_val_cols
data = data[data["M_NUM_WEATHER_FORECAST_SAMPLES"]>0]
for (sid, ts), data_sid_ts in tqdm(data.groupby(["M_SESSION_UID", "TIMESTAMP"])):
num_samples = list(data_sid_ts["M_NUM_WEATHER_FORECAST_SAMPLES"])[0]
sess_col = "M_WEATHER_FORECAST_SAMPLES_M_SESSION_TYPE"
num_nans = data_sid_ts[sess_col].isna().sum()
for sess_type, data_sid_ts_sess in data_sid_ts.iloc[num_nans:num_nans+num_samples].groupby(sess_col):
dct[(sid, ts, sess_type)] = data_sid_ts_sess[columns]
return dct
def create_processed_frame(dct, single_val_cols, multi_val_cols):
"""
Creates a table where each row corresponds to a single (session_uid, timestamp) tuple and its all possible future forecasts
Puts NaN values for forecasts that are not given
Parameters
----------
dct : Dictionary
Gets the dataframe Dictionary
single_val_cols : list
Columns which has only single values. Example: "M_TOTAL_LAPS"
multi_val_cols : list
Columns which might have multiple values. Example: "M_WEATHER_FORECAST_SAMPLES_M_WEATHER",
Returns
-------
df: DataFrame
Generated a tabular form.
"""
times = ["0", "5", "10", "15", "30", "45", "60", "90", "120"]
multi_val_cols_timed = [f"{el}_{time}" for time in times for el in multi_val_cols]
rows = []
for table in tqdm(dct.values()):
nans = [np.nan]*(len(times)-len(table))
single_vals = list(table[["M_SESSION_UID", "TIMESTAMP"] + single_val_cols].iloc[0])
multi_vals = np.array([list(table[col])+nans for col in multi_val_cols]).T.flatten()
row = single_vals + list(multi_vals)
rows.append(row)
columns = ["M_SESSION_UID", "TIMESTAMP"] + \
single_val_cols + multi_val_cols_timed
df = pd.DataFrame(columns = columns, data=rows)
return df
#adds flag information columns to given processed dset
def add_flag_info(original_dset, processed_dset):
ls = []
for i in range(len(processed_dset)):
sess_uid, ts = processed_dset[["M_SESSION_UID", "TIMESTAMP"]].iloc[i]
flags = set(original_dset[(original_dset["M_SESSION_UID"] == sess_uid) & (
original_dset["TIMESTAMP"] == ts)]["M_ZONE_FLAG"].dropna())
ls.append([1 if f in flags else 0 for f in [1, 2, 3, 4]])
processed_dset[["IS_GREEN_FLAG_UP", "IS_BLUE_FLAG_UP",
"IS_YELLOW_FLAG_UP", "IS_RED_FLAG_UP"]] = ls
return processed_dset
# calls clean_dataframe_dct, create_processed_frame for the weather.csv
# and then splits the cleaned df into train, val, test partition considering session uids
def prepare_datasets(dataset_path, single_val_cols, multi_val_cols, train_ratio = 0.7, val_ratio = 0.2, use_flag_info=True):
"""
Main function which calls clean_dataframe_dct and create_processed_frame functions
Splits them into Train, Validation Test set by session uids
Parameters
----------
dataset_path : str
Path for preprocessed_data
single_val_cols : list
Columns which has only single values. Example: "M_TOTAL_LAPS"
multi_val_cols : list
Columns which might have multiple values. Example: "M_WEATHER_FORECAST_SAMPLES_M_WEATHER",
train_ratio,val_ratio,test_ratio :
Ratio of session_uids for the given dataset
Returns
-------
train_df: DataFrame
val_df: Dataframe
test_df: Dataframe
Note: Splitting by session_uids do not guarantee that data will split exactly as the given ratio
since each session_uid have different amount of rows.
"""
data = pd.read_csv(dataset_path)
if "Unnamed: 58" in data.columns:
data = data.drop(["Unnamed: 58"],axis=1)
print("Creating (session_uid, timestamp) pairs:")
cleaned_dct = clean_dataframe_dct(data, single_val_cols, multi_val_cols)
print("Converting into dataframe:")
processed_df = create_processed_frame(cleaned_dct, single_val_cols, multi_val_cols)
# drops duplicates ignoring NA
temp_na_token = -999
processed_df[processed_df.isna()] = temp_na_token
ignored_cols = ["M_SESSION_UID", "TIMESTAMP"]
processed_df = processed_df.drop_duplicates(
subset=[col for col in processed_df.columns if col not in ignored_cols])
processed_df[processed_df==temp_na_token] = pd.NA
session_uids = list(set(processed_df["M_SESSION_UID"]))
random.shuffle(session_uids)
train_uids, val_uids, test_uids = np.split(session_uids, [int(len(session_uids)*train_ratio),
int(len(session_uids)*(train_ratio+val_ratio))])
train_df = processed_df[[uid in train_uids for uid in processed_df["M_SESSION_UID"]]]
val_df = processed_df[[
uid in val_uids for uid in processed_df["M_SESSION_UID"]]]
test_df = processed_df[[uid in test_uids for uid in processed_df["M_SESSION_UID"]]]
if use_flag_info:
train_df = add_flag_info(data, train_df)
val_df = add_flag_info(data, val_df)
test_df = add_flag_info(data, test_df)
train_df = train_df.drop(["M_SESSION_UID", "TIMESTAMP"], axis=1)
val_df = val_df.drop(["M_SESSION_UID", "TIMESTAMP"], axis=1)
test_df = test_df.drop(["M_SESSION_UID", "TIMESTAMP"], axis=1)
train_df.to_csv(op.join("data","train.csv"), index=False)
val_df.to_csv(op.join("data","val.csv"), index=False)
test_df.to_csv(op.join("data","test.csv"), index=False)
return train_df, val_df, test_df
# for given time offset creates a table that has all relevant input features and outputs
def create_dataset(dset_dct, time_offset, single_val_cols, multi_val_cols, drop_duplicates=False):
flag_cols = [col for col in dset_dct["train"].columns if "FLAG" in col]
columns = single_val_cols + multi_val_cols + flag_cols + ["TARGET_WEATHER", "TARGET_RAIN_PERCENTAGE"]
windows = time_windows[time_offset]
processed_dset_dct = {}
os.makedirs(op.join("data", str(time_offset)), exist_ok=True)
for typ, dset in dset_dct.items():
tables = []
for w in windows:
y_cols = [f"M_WEATHER_FORECAST_SAMPLES_M_WEATHER_{w[1]}", f"M_RAIN_PERCENTAGE_{w[1]}"]
tmp_cols = single_val_cols + [f"{c}_{w[0]}" for c in multi_val_cols] + flag_cols + y_cols
dset_tmp = dset[tmp_cols]
dset_tmp = dset_tmp.dropna()
tables.append(dset_tmp.__array__())
rows = [row for table in tables for row in table]
df = pd.DataFrame(columns=columns, data=rows)
df["TARGET_WEATHER"] = df["TARGET_WEATHER"].astype("int64")
# drop duplicates only from train
if drop_duplicates and typ=="train":
df = df.drop_duplicates()
df.to_csv(op.join("data", str(time_offset), typ+".csv"), index=False)
processed_dset_dct[typ] = df
return processed_dset_dct
# calls create_dataset if the dataset is not saved otherwise reads it
def get_df(df_dct, time_offset, single_val_cols, multi_val_cols, force_recreate=False):
"""
Gets single dataframes for single Time Offset
Parameters
----------
df_dct : Dictionary
Dictionary which holds main train,val,test dataset.
single_val_cols : list
Columns which has only single values. Example: "M_TOTAL_LAPS"
multi_val_cols : list
Columns which might have multiple values. Example: "M_WEATHER_FORECAST_SAMPLES_M_WEATHER",
force_recreate : Optional, Default: False
Returns
-------
df_dct for a single offset
"""
if force_recreate or not op.exists(op.join("data", time_offset, "train.csv")) or \
not op.exists(op.join("data", time_offset, "val.csv")) or not op.exists(op.join("data", time_offset, "test.csv")):
df_t_min_dct = create_dataset(
df_dct, time_offset, single_val_cols, multi_val_cols)
else:
train_df = pd.read_csv(op.join("data", time_offset, "train.csv"))
val_df = pd.read_csv(op.join("data", time_offset, "val.csv"))
test_df = pd.read_csv(op.join("data", time_offset, "test.csv"))
df_t_min_dct = {"train": train_df, "val": val_df, "test": test_df}
return df_t_min_dct
# calls get_df for all possible time_offset values
def get_dfs(df_dct, single_val_cols, multi_val_cols):
"""
Main function to get all the dataframes
Parameters
----------
df_dct : Dictionary
Dictionary which holds main train,val,test dataset.
single_val_cols : list
Columns which has only single values. Example: "M_TOTAL_LAPS"
multi_val_cols : list
Columns which might have multiple values. Example: "M_WEATHER_FORECAST_SAMPLES_M_WEATHER",
Returns
-------
df_timed_dct: Dictionary of Time Offset Dictionary.
"""
df_timed_dct = {}
for time_offset in ["5","10","15","30","60"]:
print("Creating dataset for time_offset={}".format(time_offset))
df_timed_dct[time_offset] = get_df(
df_dct, time_offset, single_val_cols, multi_val_cols)
return df_timed_dct
|
n = int(input())
for i in range(0, n):
if i == n - 1:
print("Ho!")
else:
print("Ho ", end="")
|
from .read_tles import (
read_tles,
satellite_ephem_to_str
)
from .generate_tles_from_scratch import (
generate_tles_from_scratch_manual,
generate_tles_from_scratch_with_sgp
)
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Unlimited developers
"""
Tests specific for the electrum call 'blockchain.scripthash.get_history'
"""
import asyncio
from test_framework.util import assert_equal
from test_framework.electrumutil import (
ElectrumTestFramework,
ElectrumConnection,
script_to_scripthash,
sync_electrum_height)
from test_framework.blocktools import create_transaction, pad_tx
from test_framework.script import CScript, OP_TRUE, OP_DROP, OP_NOP
GET_HISTORY = "blockchain.scripthash.get_history"
class ElectrumScripthashGetHistory(ElectrumTestFramework):
def run_test(self):
n = self.nodes[0]
self.bootstrap_p2p()
coinbases = self.mine_blocks(n, 100)
async def async_tests(loop):
cli = ElectrumConnection(loop)
await cli.connect()
await self.test_blockheight_confirmed(n, cli, coinbases.pop(0))
loop = asyncio.get_event_loop()
loop.run_until_complete(async_tests(loop))
async def test_blockheight_confirmed(self, n, cli, unspent):
# Just a unique anyone-can-spend scriptpubkey
scriptpubkey = CScript([OP_TRUE, OP_DROP, OP_NOP])
scripthash = script_to_scripthash(scriptpubkey)
# There should exist any history for scripthash
assert_equal(0, len(await cli.call(GET_HISTORY, scripthash)))
# Send tx to scripthash and confirm it
tx = create_transaction(unspent,
n = 0, value = unspent.vout[0].nValue,
sig = CScript([OP_TRUE]), out = scriptpubkey)
pad_tx(tx)
self.mine_blocks(n, 1, txns = [tx])
sync_electrum_height(n)
# History should now have 1 entry at current tip height
res = await cli.call(GET_HISTORY, scripthash)
assert_equal(1, len(res))
assert_equal(n.getblockcount(), res[0]['height'])
assert_equal(tx.hash, res[0]['tx_hash'])
if __name__ == '__main__':
ElectrumScripthashGetHistory().main()
|
# -*- coding: utf-8 -*-
# Copyright 2018 the HERA Project
# Licensed under the MIT License
'''Tests for abscal.py'''
import nose.tools as nt
import os
import shutil
import json
import numpy as np
import aipy
import optparse
import sys
from pyuvdata import UVCal, UVData
from pyuvdata import utils as uvutils
import hera_cal as hc
from hera_cal.data import DATA_PATH
from collections import OrderedDict as odict
import copy
from hera_cal.datacontainer import DataContainer
import glob
from hera_cal.utils import split_pol
class Test_AbsCal_Funcs:
def setUp(self):
np.random.seed(0)
# load into pyuvdata object
self.data_file = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
self.uvd = UVData()
self.uvd.read_miriad(self.data_file)
self.freq_array = np.unique(self.uvd.freq_array)
self.antpos, self.ants = self.uvd.get_ENU_antpos(center=True, pick_data_ants=True)
self.antpos = odict(zip(self.ants, self.antpos))
self.time_array = np.unique(self.uvd.time_array)
# configure data into dictionaries
data, flgs = hc.io.load_vis(self.uvd, pop_autos=True)
wgts = odict()
for k in flgs.keys():
wgts[k] = (~flgs[k]).astype(np.float)
wgts = hc.datacontainer.DataContainer(wgts)
# configure baselines
bls = odict([(x, self.antpos[x[0]] - self.antpos[x[1]]) for x in data.keys()])
# make mock data
abs_gain = 0.5
TT_phi = np.array([-0.004, 0.006, 0])
model = odict()
for i, k in enumerate(data.keys()):
model[k] = data[k] * np.exp(abs_gain + 1j * np.dot(TT_phi, bls[k]))
# assign data
self.data = data
self.bls = bls
self.model = model
self.wgts = wgts
def test_data_key_to_array_axis(self):
m, pk = hc.abscal.data_key_to_array_axis(self.model, 2)
nt.assert_equal(m[(24, 25)].shape, (60, 64, 1))
nt.assert_equal('XX' in pk, True)
# test w/ avg_dict
m, ad, pk = hc.abscal.data_key_to_array_axis(self.model, 2, avg_dict=self.bls)
nt.assert_equal(m[(24, 25)].shape, (60, 64, 1))
nt.assert_equal(ad[(24, 25)].shape, (3,))
nt.assert_equal('XX' in pk, True)
def test_array_axis_to_data_key(self):
m, pk = hc.abscal.data_key_to_array_axis(self.model, 2)
m2 = hc.abscal.array_axis_to_data_key(m, 2, ['XX'])
nt.assert_equal(m2[(24, 25, 'XX')].shape, (60, 64))
# copy dict
m, ad, pk = hc.abscal.data_key_to_array_axis(self.model, 2, avg_dict=self.bls)
m2, cd = hc.abscal.array_axis_to_data_key(m, 2, ['XX'], copy_dict=ad)
nt.assert_equal(m2[(24, 25, 'XX')].shape, (60, 64))
nt.assert_equal(cd[(24, 25, 'XX')].shape, (3,))
def test_interp2d(self):
# test interpolation w/ warning
m, mf = hc.abscal.interp2d_vis(self.data, self.time_array, self.freq_array,
self.time_array, self.freq_array, flags=self.wgts, medfilt_flagged=False)
nt.assert_equal(m[(24, 25, 'XX')].shape, (60, 64))
# downsampling w/ no flags
m, mf = hc.abscal.interp2d_vis(self.data, self.time_array, self.freq_array,
self.time_array[::2], self.freq_array[::2])
nt.assert_equal(m[(24, 25, 'XX')].shape, (30, 32))
# test flag propagation
m, mf = hc.abscal.interp2d_vis(self.data, self.time_array, self.freq_array,
self.time_array, self.freq_array, flags=self.wgts, medfilt_flagged=True)
nt.assert_true(mf[(24, 25, 'XX')][10, 0])
# test flag extrapolation
m, mf = hc.abscal.interp2d_vis(self.data, self.time_array, self.freq_array,
self.time_array + .0001, self.freq_array, flags=self.wgts, flag_extrapolate=True)
nt.assert_true(mf[(24, 25, 'XX')][-1].min())
def test_wiener(self):
# test smoothing
d = hc.abscal.wiener(self.data, window=(5, 15), noise=None, medfilt=True, medfilt_kernel=(1, 13))
nt.assert_equal(d[(24, 37, 'XX')].shape, (60, 64))
nt.assert_equal(d[(24, 37, 'XX')].dtype, np.complex)
# test w/ noise
d = hc.abscal.wiener(self.data, window=(5, 15), noise=0.1, medfilt=True, medfilt_kernel=(1, 13))
nt.assert_equal(d[(24, 37, 'XX')].shape, (60, 64))
# test w/o medfilt
d = hc.abscal.wiener(self.data, window=(5, 15), medfilt=False)
nt.assert_equal(d[(24, 37, 'XX')].shape, (60, 64))
# test as array
d = hc.abscal.wiener(self.data[(24, 37, 'XX')], window=(5, 15), medfilt=False, array=True)
nt.assert_equal(d.shape, (60, 64))
nt.assert_equal(d.dtype, np.complex)
def test_Baseline(self):
# test basic execution
keys = self.data.keys()
k1 = (24, 25, 'XX') # 14.6 m E-W
i1 = keys.index(k1)
k2 = (24, 37, 'XX') # different
i2 = keys.index(k2)
k3 = (52, 53, 'XX') # 14.6 m E-W
i3 = keys.index(k3)
bls = map(lambda k: hc.abscal.Baseline(self.antpos[k[1]] - self.antpos[k[0]], tol=2.0), keys)
bls_conj = map(lambda k: hc.abscal.Baseline(self.antpos[k[0]] - self.antpos[k[1]], tol=2.0), keys)
nt.assert_equal(bls[i1], bls[i1])
nt.assert_false(bls[i1] == bls[i2])
nt.assert_equal(bls[i1] == bls_conj[i1], 'conjugated')
# test different yet redundant baselines still agree
nt.assert_equal(bls[i1], bls[i3])
# test tolerance works as expected
bls = map(lambda k: hc.abscal.Baseline(self.antpos[k[1]] - self.antpos[k[0]], tol=1e-4), keys)
nt.assert_not_equal(bls[i1], bls[i3])
def test_match_red_baselines(self):
model = copy.deepcopy(self.data)
model = DataContainer(odict([((k[0] + 1, k[1] + 1, k[2]), model[k]) for i, k in enumerate(model.keys())]))
del model[(25, 54, 'XX')]
model_antpos = odict([(k + 1, self.antpos[k]) for i, k in enumerate(self.antpos.keys())])
new_model = hc.abscal.match_red_baselines(model, model_antpos, self.data, self.antpos, tol=2.0, verbose=False)
nt.assert_equal(len(new_model.keys()), 8)
nt.assert_true((24, 37, 'XX') in new_model)
nt.assert_false((24, 53, 'XX') in new_model)
def test_mirror_data_to_red_bls(self):
# make fake data
reds = hc.redcal.get_reds(self.antpos, pols=['XX'])
data = DataContainer(odict(map(lambda k: (k[0], self.data[k[0]]), reds[:5])))
# test execuation
d = hc.abscal.mirror_data_to_red_bls(data, self.antpos)
nt.assert_equal(len(d.keys()), 16)
nt.assert_true((24, 25, 'XX') in d)
# test correct value is propagated
nt.assert_almost_equal(data[(24, 25, 'XX')][30, 30], d[(38, 39, 'XX')][30, 30])
# test reweighting
w = hc.abscal.mirror_data_to_red_bls(self.wgts, self.antpos, weights=True)
nt.assert_equal(w[(24, 25, 'XX')].dtype, np.float)
nt.assert_almost_equal(w[(24, 25, 'XX')].max(), 16.0)
def test_echo(self):
hc.abscal.echo('hi', verbose=True)
hc.abscal.echo('hi', type=1, verbose=True)
def test_flatten(self):
l = hc.abscal.flatten([['hi']])
nt.assert_equal(np.array(l).ndim, 1)
def test_avg_data_across_red_bls(self):
# test basic execution
wgts = copy.deepcopy(self.wgts)
wgts[(24, 25, 'XX')][45, 45] = 0.0
data, flags, antpos, ants, freqs, times, lsts, pols = hc.io.load_vis(self.data_file, return_meta=True)
rd, rf, rk = hc.abscal.avg_data_across_red_bls(data, antpos, wgts=wgts, tol=2.0, broadcast_wgts=False)
nt.assert_equal(rd[(24, 25, 'XX')].shape, (60, 64))
nt.assert_true(rf[(24, 25, 'XX')][45, 45] > 0.0)
# test various kwargs
wgts[(24, 25, 'XX')][45, 45] = 0.0
rd, rf, rk = hc.abscal.avg_data_across_red_bls(data, antpos, tol=2.0, wgts=wgts, broadcast_wgts=True)
nt.assert_equal(len(rd.keys()), 9)
nt.assert_equal(len(rf.keys()), 9)
nt.assert_almost_equal(rf[(24, 25, 'XX')][45, 45], 0.0)
# test averaging worked
rd, rf, rk = hc.abscal.avg_data_across_red_bls(data, antpos, tol=2.0, broadcast_wgts=False)
v = np.mean([data[(52, 53, 'XX')], data[(37, 38, 'XX')], data[(24, 25, 'XX')], data[(38, 39, 'XX')]], axis=0)
nt.assert_true(np.isclose(rd[(24, 25, 'XX')], v).min())
# test mirror_red_data
rd, rf, rk = hc.abscal.avg_data_across_red_bls(data, antpos, wgts=self.wgts, tol=2.0, mirror_red_data=True)
nt.assert_equal(len(rd.keys()), 21)
nt.assert_equal(len(rf.keys()), 21)
def test_avg_file_across_red_bls(self):
rd, rf, rk = hc.abscal.avg_file_across_red_bls(self.data_file, write_miriad=False, output_data=True)
if os.path.exists('ex'):
shutil.rmtree('ex')
hc.abscal.avg_file_across_red_bls(self.data_file, outdir='.', output_fname='ex', write_miriad=True, output_data=False)
nt.assert_true(os.path.exists('ex'))
if os.path.exists('ex'):
shutil.rmtree('ex')
def test_match_times(self):
dfiles = map(lambda f: os.path.join(DATA_PATH, f), ['zen.2458043.12552.xx.HH.uvORA',
'zen.2458043.13298.xx.HH.uvORA'])
mfiles = map(lambda f: os.path.join(DATA_PATH, f), ['zen.2458042.12552.xx.HH.uvXA',
'zen.2458042.13298.xx.HH.uvXA'])
# test basic execution
relevant_mfiles = hc.abscal.match_times(dfiles[0], mfiles)
nt.assert_equal(len(relevant_mfiles), 2)
# test basic execution
relevant_mfiles = hc.abscal.match_times(dfiles[1], mfiles)
nt.assert_equal(len(relevant_mfiles), 1)
# test exception
mfiles = sorted(glob.glob(os.path.join(DATA_PATH, 'zen.2458045.*.xx.HH.uvXRAA')))
relevant_mfiles = hc.abscal.match_times(dfiles[0], mfiles)
nt.assert_equal(len(relevant_mfiles), 0)
def test_rephase_vis(self):
dfile = os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')
mfiles = map(lambda f: os.path.join(DATA_PATH, f), ['zen.2458042.12552.xx.HH.uvXA'])
m, mf, mantp, mant, mfr, mt, ml, mp = hc.io.load_vis(mfiles, return_meta=True)
d, df, dantp, dant, dfr, dt, dl, dp = hc.io.load_vis(dfile, return_meta=True)
bls = odict(map(lambda k: (k, dantp[k[0]] - dantp[k[1]]), d.keys()))
# basic execution
new_m, new_f = hc.abscal.rephase_vis(m, ml, dl, bls, dfr)
k = new_m.keys()[0]
nt.assert_equal(new_m[k].shape, d[k].shape)
nt.assert_true(new_f[k][-1].min())
nt.assert_false(new_f[k][0].max())
def test_cut_bl(self):
Nbls = len(self.data)
_data = hc.abscal.cut_bls(self.data, self.bls, 20.0)
nt.assert_true(Nbls, 21)
nt.assert_true(len(_data), 12)
class Test_AbsCal:
def setUp(self):
np.random.seed(0)
# load into pyuvdata object
self.data_fname = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
self.model_fname = os.path.join(DATA_PATH, "zen.2458042.12552.xx.HH.uvXA")
self.AC = hc.abscal.AbsCal(self.data_fname, self.model_fname, refant=24)
# make custom gain keys
d, fl, ap, a, f, t, l, p = hc.io.load_vis(self.data_fname, return_meta=True, pick_data_ants=False)
self.freq_array = f
self.antpos = ap
gain_pols = np.unique(map(split_pol, p))
self.ap = ap
self.gk = hc.abscal.flatten(map(lambda p: map(lambda k: (k, p), a), gain_pols))
self.freqs = f
def test_init(self):
# init with no meta
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data)
nt.assert_almost_equal(AC.bls, None)
# init with meta
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data, antpos=self.AC.antpos, freqs=self.AC.freqs)
nt.assert_almost_equal(AC.bls[(24, 25, 'XX')][0], -14.607842046642745)
# init with meta
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data)
# test feeding file and refant and bl_cut and bl_taper
AC = hc.abscal.AbsCal(self.model_fname, self.data_fname, refant=24, antpos=self.AC.antpos,
bl_cut=26.0, bl_taper_fwhm=15.0)
# test ref ant
nt.assert_equal(AC.refant, 24)
nt.assert_almost_equal(np.linalg.norm(AC.antpos[24]), 0.0)
# test bl cut
nt.assert_false((np.array(map(lambda k: np.linalg.norm(AC.bls[k]), AC.bls.keys())) > 26.0).any())
# test bl taper
nt.assert_true(np.median(AC.wgts[(24, 25, 'XX')]) > np.median(AC.wgts[(24, 39, 'XX')]))
def test_abs_amp_logcal(self):
# test execution and variable assignments
self.AC.abs_amp_logcal(verbose=False)
nt.assert_equal(self.AC.abs_eta[(24, 'jxx')].shape, (60, 64))
nt.assert_equal(self.AC.abs_eta_gain[(24, 'jxx')].shape, (60, 64))
nt.assert_equal(self.AC.abs_eta_arr.shape, (7, 60, 64, 1))
nt.assert_equal(self.AC.abs_eta_gain_arr.shape, (7, 60, 64, 1))
# test Nones
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data)
nt.assert_equal(AC.abs_eta, None)
nt.assert_equal(AC.abs_eta_arr, None)
nt.assert_equal(AC.abs_eta_gain, None)
nt.assert_equal(AC.abs_eta_gain_arr, None)
# test propagation to gain_arr
AC.abs_amp_logcal(verbose=False)
AC._abs_eta_arr *= 0
nt.assert_almost_equal(np.abs(AC.abs_eta_gain_arr[0, 0, 0, 0]), 1.0)
# test custom gain
g = self.AC.custom_abs_eta_gain(self.gk)
nt.assert_equal(len(g), 47)
# test w/ no wgts
AC.wgts = None
AC.abs_amp_logcal(verbose=False)
def test_TT_phs_logcal(self):
# test execution
self.AC.TT_phs_logcal(verbose=False)
nt.assert_equal(self.AC.TT_Phi_arr.shape, (7, 2, 60, 64, 1))
nt.assert_equal(self.AC.TT_Phi_gain_arr.shape, (7, 60, 64, 1))
nt.assert_equal(self.AC.abs_psi_arr.shape, (7, 60, 64, 1))
nt.assert_equal(self.AC.abs_psi_gain_arr.shape, (7, 60, 64, 1))
nt.assert_equal(self.AC.abs_psi[(24, 'jxx')].shape, (60, 64))
nt.assert_equal(self.AC.abs_psi_gain[(24, 'jxx')].shape, (60, 64))
nt.assert_equal(self.AC.TT_Phi[(24, 'jxx')].shape, (2, 60, 64))
nt.assert_equal(self.AC.TT_Phi_gain[(24, 'jxx')].shape, (60, 64))
nt.assert_true(np.isclose(np.angle(self.AC.TT_Phi_gain[(24, 'jxx')]), 0.0).all())
# test merge pols
self.AC.TT_phs_logcal(verbose=False, four_pol=True)
nt.assert_equal(self.AC.TT_Phi_arr.shape, (7, 2, 60, 64, 1))
nt.assert_equal(self.AC.abs_psi_arr.shape, (7, 60, 64, 1))
# test Nones
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data, antpos=self.antpos)
nt.assert_equal(AC.abs_psi_arr, None)
nt.assert_equal(AC.abs_psi_gain_arr, None)
nt.assert_equal(AC.TT_Phi_arr, None)
nt.assert_equal(AC.TT_Phi_gain_arr, None)
nt.assert_equal(AC.abs_psi, None)
nt.assert_equal(AC.abs_psi_gain, None)
nt.assert_equal(AC.TT_Phi, None)
nt.assert_equal(AC.TT_Phi_gain, None)
# test custom gain
g = self.AC.custom_TT_Phi_gain(self.gk, self.ap)
nt.assert_equal(len(g), 47)
g = self.AC.custom_abs_psi_gain(self.gk)
nt.assert_equal(g[(0, 'jxx')].shape, (60, 64))
# test w/ no wgts
AC.wgts = None
AC.TT_phs_logcal(verbose=False)
def test_amp_logcal(self):
self.AC.amp_logcal(verbose=False)
nt.assert_equal(self.AC.ant_eta[(24, 'jxx')].shape, (60, 64))
nt.assert_equal(self.AC.ant_eta_gain[(24, 'jxx')].shape, (60, 64))
nt.assert_equal(self.AC.ant_eta_arr.shape, (7, 60, 64, 1))
nt.assert_equal(self.AC.ant_eta_arr.dtype, np.float)
nt.assert_equal(self.AC.ant_eta_gain_arr.shape, (7, 60, 64, 1))
nt.assert_equal(self.AC.ant_eta_gain_arr.dtype, np.complex)
# test Nones
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data)
nt.assert_equal(AC.ant_eta, None)
nt.assert_equal(AC.ant_eta_gain, None)
nt.assert_equal(AC.ant_eta_arr, None)
nt.assert_equal(AC.ant_eta_gain_arr, None)
# test w/ no wgts
AC.wgts = None
AC.amp_logcal(verbose=False)
def test_phs_logcal(self):
self.AC.phs_logcal(verbose=False)
nt.assert_equal(self.AC.ant_phi[(24, 'jxx')].shape, (60, 64))
nt.assert_equal(self.AC.ant_phi_gain[(24, 'jxx')].shape, (60, 64))
nt.assert_equal(self.AC.ant_phi_arr.shape, (7, 60, 64, 1))
nt.assert_equal(self.AC.ant_phi_arr.dtype, np.float)
nt.assert_equal(self.AC.ant_phi_gain_arr.shape, (7, 60, 64, 1))
nt.assert_equal(self.AC.ant_phi_gain_arr.dtype, np.complex)
nt.assert_true(np.isclose(np.angle(self.AC.ant_phi_gain[(24, 'jxx')]), 0.0).all())
self.AC.phs_logcal(verbose=False, avg=True)
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data)
nt.assert_equal(AC.ant_phi, None)
nt.assert_equal(AC.ant_phi_gain, None)
nt.assert_equal(AC.ant_phi_arr, None)
nt.assert_equal(AC.ant_phi_gain_arr, None)
# test w/ no wgts
AC.wgts = None
AC.phs_logcal(verbose=False)
def test_delay_lincal(self):
# test w/o offsets
self.AC.delay_lincal(verbose=False, kernel=(1, 3), medfilt=False, solve_offsets=False)
nt.assert_equal(self.AC.ant_dly[(24, 'jxx')].shape, (60, 1))
nt.assert_equal(self.AC.ant_dly_gain[(24, 'jxx')].shape, (60, 64))
nt.assert_equal(self.AC.ant_dly_arr.shape, (7, 60, 1, 1))
nt.assert_equal(self.AC.ant_dly_gain_arr.shape, (7, 60, 64, 1))
# test w/ offsets
self.AC.delay_lincal(verbose=False, kernel=(1, 3), medfilt=False, solve_offsets=True)
nt.assert_equal(self.AC.ant_dly_phi[(24, 'jxx')].shape, (60, 1))
nt.assert_equal(self.AC.ant_dly_phi_gain[(24, 'jxx')].shape, (60, 64))
nt.assert_equal(self.AC.ant_dly_phi_arr.shape, (7, 60, 1, 1))
nt.assert_equal(self.AC.ant_dly_phi_gain_arr.shape, (7, 60, 64, 1))
nt.assert_equal(self.AC.ant_dly_arr.shape, (7, 60, 1, 1))
nt.assert_equal(self.AC.ant_dly_arr.dtype, np.float)
nt.assert_equal(self.AC.ant_dly_gain_arr.shape, (7, 60, 64, 1))
nt.assert_equal(self.AC.ant_dly_gain_arr.dtype, np.complex)
nt.assert_true(np.isclose(np.angle(self.AC.ant_dly_gain[(24, 'jxx')]), 0.0).all())
nt.assert_true(np.isclose(np.angle(self.AC.ant_dly_phi_gain[(24, 'jxx')]), 0.0).all())
# test exception
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data)
nt.assert_raises(AttributeError, AC.delay_lincal)
# test Nones
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data, freqs=self.freq_array)
nt.assert_equal(AC.ant_dly, None)
nt.assert_equal(AC.ant_dly_gain, None)
nt.assert_equal(AC.ant_dly_arr, None)
nt.assert_equal(AC.ant_dly_gain_arr, None)
nt.assert_equal(AC.ant_dly_phi, None)
nt.assert_equal(AC.ant_dly_phi_gain, None)
nt.assert_equal(AC.ant_dly_phi_arr, None)
nt.assert_equal(AC.ant_dly_phi_gain_arr, None)
# test flags handling
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data, freqs=self.freqs)
AC.wgts[(24, 25, 'XX')] *= 0
AC.delay_lincal(verbose=False)
# test medfilt
self.AC.delay_lincal(verbose=False, medfilt=False)
self.AC.delay_lincal(verbose=False, time_avg=True)
# test w/ no wgts
AC.wgts = None
AC.delay_lincal(verbose=False)
def test_delay_slope_lincal(self):
# test w/o offsets
self.AC.delay_slope_lincal(verbose=False, kernel=(1, 3), medfilt=False)
nt.assert_equal(self.AC.dly_slope[(24, 'jxx')].shape, (2, 60, 1))
nt.assert_equal(self.AC.dly_slope_gain[(24, 'jxx')].shape, (60, 64))
nt.assert_equal(self.AC.dly_slope_arr.shape, (7, 2, 60, 1, 1))
nt.assert_equal(self.AC.dly_slope_gain_arr.shape, (7, 60, 64, 1))
nt.assert_equal(self.AC.dly_slope_ant_dly_arr.shape, (7, 60, 1, 1))
nt.assert_true(np.isclose(np.angle(self.AC.dly_slope_gain[(24, 'jxx')]), 0.0).all())
g = self.AC.custom_dly_slope_gain(self.gk, self.ap)
nt.assert_equal(g[(0, 'jxx')].shape, (60, 64))
# test exception
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data)
nt.assert_raises(AttributeError, AC.delay_slope_lincal)
# test Nones
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data, antpos=self.antpos, freqs=self.freq_array)
nt.assert_equal(AC.dly_slope, None)
nt.assert_equal(AC.dly_slope_gain, None)
nt.assert_equal(AC.dly_slope_arr, None)
nt.assert_equal(AC.dly_slope_gain_arr, None)
nt.assert_equal(AC.dly_slope_ant_dly_arr, None)
# test medfilt and time_avg
self.AC.delay_slope_lincal(verbose=False, medfilt=False)
self.AC.delay_slope_lincal(verbose=False, time_avg=True)
# test four pol
self.AC.delay_slope_lincal(verbose=False, four_pol=True)
nt.assert_equal(self.AC.dly_slope[(24, 'jxx')].shape, (2, 60, 1))
nt.assert_equal(self.AC.dly_slope_gain[(24, 'jxx')].shape, (60, 64))
nt.assert_equal(self.AC.dly_slope_arr.shape, (7, 2, 60, 1, 1))
nt.assert_equal(self.AC.dly_slope_gain_arr.shape, (7, 60, 64, 1))
# test flags handling
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data, antpos=self.ap, freqs=self.freqs)
AC.wgts[(24, 25, 'XX')] *= 0
AC.delay_slope_lincal(verbose=False)
# test w/ no wgts
AC.wgts = None
AC.delay_slope_lincal(verbose=False)
def test_global_phase_slope_logcal(self):
# test w/o offsets
self.AC.global_phase_slope_logcal(verbose=False, edge_cut=31)
nt.assert_equal(self.AC.phs_slope[(24, 'jxx')].shape, (2, 60, 1))
nt.assert_equal(self.AC.phs_slope_gain[(24, 'jxx')].shape, (60, 64))
nt.assert_equal(self.AC.phs_slope_arr.shape, (7, 2, 60, 1, 1))
nt.assert_equal(self.AC.phs_slope_gain_arr.shape, (7, 60, 64, 1))
nt.assert_equal(self.AC.phs_slope_ant_phs_arr.shape, (7, 60, 1, 1))
nt.assert_true(np.isclose(np.angle(self.AC.phs_slope_gain[(24, 'jxx')]), 0.0).all())
g = self.AC.custom_phs_slope_gain(self.gk, self.ap)
print g.keys()
nt.assert_equal(g[(0, 'jxx')].shape, (60, 64))
# test Nones
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data, antpos=self.antpos, freqs=self.freq_array)
nt.assert_equal(AC.phs_slope, None)
nt.assert_equal(AC.phs_slope_gain, None)
nt.assert_equal(AC.phs_slope_arr, None)
nt.assert_equal(AC.phs_slope_gain_arr, None)
nt.assert_equal(AC.phs_slope_ant_phs_arr, None)
AC = hc.abscal.AbsCal(self.AC.model, self.AC.data, antpos=self.ap, freqs=self.freqs)
AC.wgts[(24, 25, 'XX')] *= 0
AC.global_phase_slope_logcal(verbose=False)
# test w/ no wgts
AC.wgts = None
AC.global_phase_slope_logcal(verbose=False)
def test_merge_gains(self):
self.AC.abs_amp_logcal(verbose=False)
self.AC.TT_phs_logcal(verbose=False)
self.AC.delay_lincal(verbose=False)
self.AC.phs_logcal(verbose=False)
self.AC.amp_logcal(verbose=False)
gains = (self.AC.abs_eta_gain, self.AC.TT_Phi_gain, self.AC.abs_psi_gain,
self.AC.ant_dly_gain, self.AC.ant_eta_gain, self.AC.ant_phi_gain)
gains = hc.abscal.merge_gains(gains)
k = (53, 'jxx')
nt.assert_equal(gains[k].shape, (60, 64))
nt.assert_equal(gains[k].dtype, np.complex)
nt.assert_almost_equal(np.abs(gains[k][0, 0]), np.abs(self.AC.abs_eta_gain[k] * self.AC.ant_eta_gain[k])[0, 0])
nt.assert_almost_equal(np.angle(gains[k][0, 0]), np.angle(self.AC.TT_Phi_gain[k] * self.AC.abs_psi_gain[k] *
self.AC.ant_dly_gain[k] * self.AC.ant_phi_gain[k])[0, 0])
def test_apply_gains(self):
# test basic execution
self.AC.abs_amp_logcal(verbose=False)
self.AC.TT_phs_logcal(verbose=False)
self.AC.delay_lincal(verbose=False)
self.AC.phs_logcal(verbose=False)
self.AC.amp_logcal(verbose=False)
gains = (self.AC.abs_eta_gain, self.AC.TT_Phi_gain, self.AC.abs_psi_gain,
self.AC.ant_dly_gain, self.AC.ant_eta_gain, self.AC.ant_phi_gain)
corr_data = hc.abscal.apply_gains(self.AC.data, gains, gain_convention='multiply')
nt.assert_equal(corr_data[(24, 25, 'XX')].shape, (60, 64))
nt.assert_equal(corr_data[(24, 25, 'XX')].dtype, np.complex)
nt.assert_almost_equal(corr_data[(24, 25, 'XX')][0, 0], (self.AC.data[(24, 25, 'XX')] *
self.AC.abs_eta_gain[(24, 'jxx')] * self.AC.abs_eta_gain[(25, 'jxx')] * self.AC.ant_eta_gain[(24, 'jxx')] *
self.AC.ant_eta_gain[(25, 'jxx')])[0, 0])
corr_data = hc.abscal.apply_gains(self.AC.data, gains, gain_convention='divide')
nt.assert_equal(corr_data[(24, 25, 'XX')].shape, (60, 64))
nt.assert_equal(corr_data[(24, 25, 'XX')].dtype, np.complex)
nt.assert_almost_equal(corr_data[(24, 25, 'XX')][0, 0], (self.AC.data[(24, 25, 'XX')] /
self.AC.abs_eta_gain[(24, 'jxx')] / self.AC.abs_eta_gain[(25, 'jxx')] / self.AC.ant_eta_gain[(24, 'jxx')] /
self.AC.ant_eta_gain[(25, 'jxx')])[0, 0])
# test for missing data
gains = copy.deepcopy(self.AC.abs_eta_gain)
del gains[(24, 'jxx')]
corr_data = hc.abscal.apply_gains(self.AC.data, gains)
nt.assert_true((24, 25, 'XX') not in corr_data)
def test_fill_dict_nans(self):
data = copy.deepcopy(self.AC.data)
wgts = copy.deepcopy(self.AC.wgts)
data[(25, 38, 'XX')][15, 20] *= np.nan
data[(25, 38, 'XX')][20, 15] *= np.inf
hc.abscal.fill_dict_nans(data, wgts=wgts, nan_fill=-1, inf_fill=-2)
nt.assert_equal(data[(25, 38, 'XX')][15, 20].real, -1)
nt.assert_equal(data[(25, 38, 'XX')][20, 15].real, -2)
nt.assert_almost_equal(wgts[(25, 38, 'XX')][15, 20], 0)
nt.assert_almost_equal(wgts[(25, 38, 'XX')][20, 15], 0)
data = copy.deepcopy(self.AC.data)
wgts = copy.deepcopy(self.AC.wgts)
data[(25, 38, 'XX')][15, 20] *= np.nan
data[(25, 38, 'XX')][20, 15] *= np.inf
hc.abscal.fill_dict_nans(data[(25, 38, 'XX')], wgts=wgts[(25, 38, 'XX')], nan_fill=-1, inf_fill=-2, array=True)
nt.assert_equal(data[(25, 38, 'XX')][15, 20].real, -1)
nt.assert_equal(data[(25, 38, 'XX')][20, 15].real, -2)
nt.assert_almost_equal(wgts[(25, 38, 'XX')][15, 20], 0)
nt.assert_almost_equal(wgts[(25, 38, 'XX')][20, 15], 0)
def test_fft_dly(self):
# test basic execution
k = (24, 25, 'XX')
vis = self.AC.model[k] / self.AC.data[k]
hc.abscal.fill_dict_nans(vis, nan_fill=0.0, inf_fill=0.0, array=True)
df = np.median(np.diff(self.AC.freqs))
# basic execution
dly, offset = hc.abscal.fft_dly(vis, df, medfilt=False, solve_phase=False)
nt.assert_equal(dly.shape, (60, 1))
nt.assert_equal(offset, None)
# median filtering
dly, offset = hc.abscal.fft_dly(vis, df, medfilt=True, solve_phase=False)
nt.assert_equal(dly.shape, (60, 1))
nt.assert_equal(offset, None)
# solve phase
dly, offset = hc.abscal.fft_dly(vis, df, medfilt=True, solve_phase=True)
nt.assert_equal(dly.shape, (60, 1))
nt.assert_equal(offset.shape, (60, 1))
# test windows and edgecut
dly, offset = hc.abscal.fft_dly(vis, df, medfilt=False, solve_phase=False, edge_cut=2, window='hann')
dly, offset = hc.abscal.fft_dly(vis, df, medfilt=False, solve_phase=False, window='blackmanharris')
nt.assert_raises(ValueError, hc.abscal.fft_dly, vis, df, window='foo')
nt.assert_raises(AssertionError, hc.abscal.fft_dly, vis, df, edge_cut=1000)
# test mock data
tau = np.array([1.5e-8]).reshape(1, -1) # 15 nanoseconds
f = np.linspace(0, 100e6, 1024)
df = np.median(np.diff(f))
r = np.exp(1j * 2 * np.pi * f * tau)
dly, offset = hc.abscal.fft_dly(r, df, medfilt=True, kernel=(1, 5), solve_phase=False)
nt.assert_almost_equal(float(dly), 1.5e-8, delta=1e-9)
def test_abscal_arg_parser(self):
a = hc.abscal.abscal_arg_parser()
def test_omni_abscal_arg_parser(self):
a = hc.abscal.omni_abscal_arg_parser()
def test_abscal_run(self):
data_files = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
model_files = [os.path.join(DATA_PATH, "zen.2458042.12552.xx.HH.uvXA"),
os.path.join(DATA_PATH, "zen.2458042.13298.xx.HH.uvXA")]
# blank run
gains, flags = hc.abscal.abscal_run(data_files, model_files, gen_amp_cal=True, write_calfits=False, return_gains=True, verbose=False)
# assert shapes and types
nt.assert_equal(gains[(24, 'jxx')].dtype, np.complex)
nt.assert_equal(gains[(24, 'jxx')].shape, (60, 64))
# first freq bin should be flagged due to complete flagging in model and data
nt.assert_true(flags[(24, 'jxx')][:, 0].all())
# solar flag run
gains, flags = hc.abscal.abscal_run(data_files, model_files, solar_horizon=0.0, gen_amp_cal=True, write_calfits=False, return_gains=True, verbose=False)
# all data should be flagged
nt.assert_true(flags[(24, 'jxx')].all())
# write calfits
outdir = "./"
cf_name = "ex.calfits"
if os.path.exists(os.path.join(outdir, cf_name)):
os.remove(os.path.join(outdir, cf_name))
gains, flags = hc.abscal.abscal_run(data_files, model_files, gen_amp_cal=True, write_calfits=True, output_calfits_fname=cf_name, outdir=outdir,
return_gains=True, verbose=False)
nt.assert_true(os.path.exists(os.path.join(outdir, cf_name)))
if os.path.exists(os.path.join(outdir, cf_name)):
os.remove(os.path.join(outdir, cf_name))
# check match_red_bls and reweight
hc.abscal.abscal_run(data_files, model_files, gen_amp_cal=True, write_calfits=False, verbose=False,
match_red_bls=True, reweight=True)
# check all calibration routines
gains, flags = hc.abscal.abscal_run(data_files, model_files, write_calfits=False, verbose=False, return_gains=True, delay_slope_cal=True, phase_slope_cal=True,
delay_cal=True, avg_phs_cal=True, abs_amp_cal=True, TT_phs_cal=True, gen_amp_cal=False, gen_phs_cal=False)
nt.assert_equal(gains[(24, 'jxx')].dtype, np.complex)
nt.assert_equal(gains[(24, 'jxx')].shape, (60, 64))
if os.path.exists('./ex.calfits'):
os.remove('./ex.calfits')
# check exceptions
nt.assert_raises(ValueError, hc.abscal.abscal_run, data_files, model_files, all_antenna_gains=True, outdir='./',
output_calfits_fname='ex.calfits', abs_amp_cal=False, TT_phs_cal=False, delay_cal=True, verbose=False)
nt.assert_raises(ValueError, hc.abscal.abscal_run, data_files, model_files, all_antenna_gains=True, outdir='./',
output_calfits_fname='ex.calfits', abs_amp_cal=False, TT_phs_cal=False, gen_phs_cal=True, verbose=False)
nt.assert_raises(ValueError, hc.abscal.abscal_run, data_files, model_files, all_antenna_gains=True, outdir='./',
output_calfits_fname='ex.calfits', abs_amp_cal=False, TT_phs_cal=False, gen_amp_cal=True, verbose=False)
if os.path.exists('./ex.calfits'):
os.remove('./ex.calfits')
# check all antenna gains run
hc.abscal.abscal_run(data_files, model_files, abs_amp_cal=True, all_antenna_gains=True, write_calfits=False)
# test general bandpass solvers
hc.abscal.abscal_run(data_files, model_files, TT_phs_cal=False, abs_amp_cal=False, gen_amp_cal=True, gen_phs_cal=True, write_calfits=False)
# test exception
nt.assert_raises(ValueError, hc.abscal.abscal_run, data_files, model_files, verbose=False, overwrite=True)
# check blank & flagged calfits file written if no LST overlap
bad_model_files = sorted(glob.glob(os.path.join(DATA_PATH, "zen.2458044.*.xx.HH.uvXRAA")))
hc.abscal.abscal_run(data_files, bad_model_files, write_calfits=True, overwrite=True, outdir='./',
output_calfits_fname='ex.calfits', verbose=False)
uvc = UVCal()
uvc.read_calfits('./ex.calfits')
nt.assert_true(uvc.flag_array.min())
nt.assert_almost_equal(uvc.gain_array.max(), 1.0)
os.remove('./ex.calfits')
# test w/ calfits files
calfits_infile = os.path.join(DATA_PATH, 'zen.2458043.12552.HH.uvA.omni.calfits')
hc.abscal.abscal_run(data_files, model_files, calfits_infile=calfits_infile, delay_slope_cal=True, phase_slope_cal=True,
outdir='./', output_calfits_fname='ex.calfits', overwrite=True, verbose=False, refant=38)
uvc = UVCal()
uvc.read_calfits('./ex.calfits')
nt.assert_true(uvc.total_quality_array is not None)
nt.assert_almost_equal(uvc.quality_array[1, 0, 32, 0, 0], 6378.8367138680978, places=3)
nt.assert_true(uvc.flag_array[0].min())
nt.assert_true(len(uvc.history) > 1000)
# assert refant phase is zero
nt.assert_true(np.isclose(np.angle(uvc.gain_array[uvc.ant_array.tolist().index(38)]), 0.0).all())
os.remove('./ex.calfits')
def test_mock_data(self):
# load into pyuvdata object
data_file = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
data, flgs, ap, a, f, t, l, p = hc.io.load_vis(data_file, return_meta=True)
wgts = odict()
for k in flgs.keys():
wgts[k] = (~flgs[k]).astype(np.float)
wgts = hc.datacontainer.DataContainer(wgts)
# make mock data
dly_slope = np.array([-1e-9, 2e-9, 0])
model = odict()
for i, k in enumerate(data.keys()):
bl = np.around(ap[k[0]] - ap[k[1]], 0)
model[k] = data[k] * np.exp(2j * np.pi * f * np.dot(dly_slope, bl))
model = DataContainer(model)
# setup AbsCal
AC = hc.abscal.AbsCal(model, data, antpos=ap, wgts=wgts, freqs=f)
# run delay_slope_cal
AC.delay_slope_lincal(time_avg=True, verbose=False)
# test recovery: accuracy only checked at 10% level
nt.assert_almost_equal(AC.dly_slope_arr[0, 0, 0, 0, 0], 1e-9, delta=1e-10)
nt.assert_almost_equal(AC.dly_slope_arr[0, 1, 0, 0, 0], -2e-9, delta=1e-10)
# make mock data
abs_gain = 0.02
TT_phi = np.array([1e-3, -1e-3, 0])
model = odict()
for i, k in enumerate(data.keys()):
bl = np.around(ap[k[0]] - ap[k[1]], 0)
model[k] = data[k] * np.exp(abs_gain + 1j * np.dot(TT_phi, bl))
model = DataContainer(model)
# setup AbsCal
AC = hc.abscal.AbsCal(model, data, antpos=ap, wgts=wgts, freqs=f)
# run abs_amp cal
AC.abs_amp_logcal(verbose=False)
# run TT_phs_logcal
AC.TT_phs_logcal(verbose=False)
nt.assert_almost_equal(np.median(AC.abs_eta_arr[0, :, :, 0][AC.wgts[(24, 25, 'XX')].astype(np.bool)]),
-0.01, delta=1e-3)
nt.assert_almost_equal(np.median(AC.TT_Phi_arr[0, 0, :, :, 0][AC.wgts[(24, 25, 'XX')].astype(np.bool)]),
-1e-3, delta=1e-4)
nt.assert_almost_equal(np.median(AC.TT_Phi_arr[0, 1, :, :, 0][AC.wgts[(24, 25, 'XX')].astype(np.bool)]),
1e-3, delta=1e-4)
|
from capstone.x86_const import *
JMPS = [
X86_INS_JA,
X86_INS_JAE,
X86_INS_JB,
X86_INS_JBE,
X86_INS_JCXZ,
X86_INS_JE,
X86_INS_JECXZ,
X86_INS_JG,
X86_INS_JGE,
X86_INS_JL,
X86_INS_JLE,
X86_INS_JMP,
X86_INS_JNE,
X86_INS_JNO,
X86_INS_JNP,
X86_INS_JNS,
X86_INS_JO,
X86_INS_JP,
X86_INS_JRCXZ,
X86_INS_JS,
X86_INS_CALL,
X86_INS_LOOP,
X86_INS_LOOPE,
X86_INS_LOOPNE,
]
class Base(object):
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, repr(str(self)))
class Op(Base):
ins = None
op = None
@classmethod
def fromop(cls, ins, op):
i = cls()
i.ins = ins
i.op = op
i.parse()
return i
def __ne__(self, other):
return not(self == other)
class LabelOp(Op):
def __init__(self, name=None, any=False):
self.name = name
self.any = any
def __eq__(self, other):
return isinstance(other, LabelOp) and (other.name == self.name or self.any or other.any)
def __str__(self):
return self.name
class Imm(Op):
def __init__(self, val=0, any=False):
self.val = val
self.any = any
def parse(self):
self.val = self.op.imm
def __eq__(self, other):
if isinstance(other, (int, long)) and (self.val == other):
return True
return isinstance(other, Imm) and (other.val == self.val or self.any or other.any)
def __cmp__(self, other):
if not isinstance(other, Imm):
raise TypeError
return cmp(self.val, other.val)
def __str__(self):
if self.val >= 0:
return '0x%x' % self.val
else:
return '-0x%x' % abs(self.val)
class Reg(Op):
def __init__(self, reg='', any=False):
self.reg = reg
self.any = any
def parse(self):
self.reg = self.ins.reg_name(self.op.reg)
def __eq__(self, other):
if isinstance(other, basestring) and self.reg == other:
return True
return isinstance(other, Reg) and (other.reg == self.reg or self.any or other.any)
def __str__(self):
return self.reg
class Mem(Op):
MEM_SIZE = {
1: 'byte ptr',
2: 'word ptr',
4: 'dword ptr',
8: 'qword ptr',
10: 'xword ptr',
}
def __init__(self, size=0, base=None, index=None, segment=None, scale=1, off=0, any=False):
self.size = size
self.base = base
self.index = index
self.segment = segment
self.scale = scale
self.off = off
self.any = any
def parse(self):
ins = self.ins
op = self.op.mem
self.size = self.op.size
# TODO: op.size = dword ptr?
if op.base:
self.base = ins.reg_name(op.base)
if op.index:
self.index = ins.reg_name(op.index)
if op.segment:
# segment looks like es:[%s]
self.segment = ins.reg_name(op.segment)
self.scale = op.scale
self.off = op.disp
def __eq__(self, other):
return isinstance(other, Mem) and ((
self.size, self.base, self.index, self.segment, self.scale, self.off,
) == (other.size, other.base, other.index, other.segment, other.scale, other.off) or self.any or other.any)
def __str__(self):
tmp = []
if self.base:
tmp.append(self.base)
if self.index:
if tmp: tmp.append('+')
tmp.append(self.index)
if self.scale != 1:
# you'd better have an index to multiply!
assert(self.index)
tmp += ['*', '%d' % self.scale]
if self.off:
if tmp:
if self.off > 0: tmp.append('+')
else: tmp.append('-')
tmp.append('%d' % abs(self.off))
bracket = '[%s]' % (' '.join(tmp))
if self.segment:
bracket = '%s:%s' % (self.segment, bracket)
final = '%s %s' % (self.MEM_SIZE[self.size], bracket)
return final
OPS = {
X86_OP_IMM: Imm,
X86_OP_REG: Reg,
X86_OP_MEM: Mem,
}
class Ins(Base):
addr = None
ins = None
@classmethod
def fromins(cls, ins):
ops = []
for op in ins.operands:
opcls = OPS.get(op.type)
if opcls:
ops.append(opcls.fromop(ins, op))
else:
print 'UNSUPPORTED OP', op, ins.op_str
assert(False)
c = cls(ins.mnemonic, *ops)
c.ins = ins
c.addr = ins.address
return c
@property
def dst(self):
return self.ops[0]
@property
def src(self):
return self.ops[1]
def __init__(self, mne, *ops, **kwargs):
self.mne = mne
self.ops = ops
self.label = None
self.any = kwargs.get('any', False)
def op_str(self):
return ', '.join(map(str, self.ops))
def __eq__(self, other):
if isinstance(other, basestring) and other == self.mne:
return True
return isinstance(other, Ins) and self.mne == other.mne and (tuple(other.ops) == tuple(self.ops) or self.any or other.any)
def __str__(self):
out = '%s %s' % (self.mne, self.op_str())
if self.label:
out = '%s: %s' % self.label
return out
class Label(Base):
mne = None
ops = ()
def __init__(self, name):
self.name = name
def __str__(self):
return '%s:' % self.name
class IR(list):
def asm(self):
return '\n'.join(map(str, self))
def irdis(dis):
if not dis:
return IR([])
dis_addr = dis[0].address
size = dis[-1].address + dis[-1].size - dis_addr
tmp = []
next_label = 1
labels = {}
for ins in dis:
if ins.id in JMPS:
dst = ins.operands[0]
if dst.type == X86_OP_IMM:
addr = dst.imm
if addr >= dis_addr and addr < dis_addr + size:
if addr not in labels:
labels[addr] = Label('L%d' % next_label)
next_label += 1
x = Ins(ins.mnemonic, LabelOp(labels[addr].name))
x.addr = ins.address
x.ins = ins
tmp.append(x)
continue
tmp.append(Ins.fromins(ins))
out = []
for i, ins in enumerate(tmp):
label = labels.get(ins.addr)
if label:
out.append(label)
out.append(ins)
ir = IR(out)
return ir
|
#
# Torque log parser and UR generator
#
# Module for the SGAS Batch system Reporting Tool (BaRT).
#
# Author: Henrik Thostrup Jensen <htj@ndgf.org>
# Author: Andreas Engelbredt Dalsgaard <andreas.dalsgaard@gmail.com>
# Author: Magnus Jonsson <magnus@hpc2n.umu.se>
# Copyright: Nordic Data Grid Facility (2009, 2010)
import os
import time
import logging
from bart import common
from bart.usagerecord import usagerecord
SECTION = 'torque'
STATEFILE = 'statefile'
DEFAULT_STATEFILE = SECTION + '.state'
SPOOL_DIR = 'spooldir'
DEFAULT_SPOOL_DIR = '/var/spool/torque'
CONFIG = {
STATEFILE: { 'required': False },
SPOOL_DIR: { 'required': False },
}
TORQUE_DATE_FORMAT = '%Y%m%d'
class TorqueLogParser:
"""
Parser for torque accounting log.
"""
def __init__(self, log_file):
self.log_file = log_file
self.file_ = None
def openFile(self):
self.file_ = open(self.log_file)
def splitLineEntry(self, line):
line_tokens = line.split(' ')
fields = {}
start_fields = line_tokens[1].split(';')
fields['entrytype'] = start_fields[1]
fields['jobid'] = start_fields[2]
fields['user'] = start_fields[3].split('=')[1]
for e in line_tokens:
e = e.strip()
r = e.split('=')
if len(r) >= 2:
fields[r[0]] = '='.join(r[1:len(r)])
return fields
def getNextLogLine(self):
if self.file_ is None:
self.openFile()
while True:
line = self.file_.readline()
if line == '': #last line
return None
if line[20] == 'E':
return line
def getNextLogEntry(self):
line = self.getNextLogLine()
if line is None:
return None
return self.splitLineEntry(line)
def spoolToEntry(self, entry_id):
while True:
log_entry = self.getNextLogEntry()
if log_entry is None or log_entry['jobid'] == entry_id:
break
class Torque:
state = None
cfg = None
missing_user_mappings = None
def __init__(self, cfg):
self.cfg = cfg
def getStateFile(self):
"""
Return the name of the statefile
"""
return self.cfg.getConfigValue(SECTION, STATEFILE, DEFAULT_STATEFILE)
def getCoreCount(self,nodes):
"""
Find number of cores used by parsing the Resource_List.nodes value
{<node_count> | <hostname>} [:ppn=<ppn>][:<property>[:<property>]...] [+ ...]
http://www.clusterresources.com/torquedocs21/2.1jobsubmission.shtml#nodeExamples
"""
cores = 0
for node_req in nodes.split('+'):
listTmp = node_req.split(':')
if listTmp[0].isdigit():
first = int(listTmp[0])
else:
first = 1
cores += first
if len(listTmp) > 1:
for e in listTmp:
if len(e) > 3:
if e[0:3] == 'ppn':
cores -= first
cores += first*int(e.split('=')[1])
break
return cores
def getSeconds(self,torque_timestamp):
"""
Convert time string in the form HH:MM:SS to seconds
"""
(hours, minutes, seconds) = torque_timestamp.split(':')
return int(hours)*3600 + int(minutes)*60 + int(seconds)
def createUsageRecord(self, log_entry, hostname, user_map, vo_map):
"""
Creates a Usage Record object given a Torque log entry.
"""
# extract data from the workload trace (log_entry)
job_id = log_entry['jobid']
user_name = log_entry['user']
queue = log_entry['queue']
account = log_entry.get('account')
submit_time = int(log_entry['ctime'])
start_time = int(log_entry['start'])
end_time = int(log_entry['end'])
utilized_cpu = self.getSeconds(log_entry['resources_used.cput'])
wall_time = self.getSeconds(log_entry['resources_used.walltime'])
hosts = list(set([hc.split('/')[0] for hc in log_entry['exec_host'].split('+')]))
# initial value
node_count = len(hosts)
if log_entry.has_key('Resource_List.ncpus'):
core_count = int(log_entry['Resource_List.ncpus'])
elif log_entry.has_key('Resource_List.nodes'):
core_count = self.getCoreCount(log_entry['Resource_List.nodes'])
# mppwidth is used on e.g. Cray machines instead of ncpus / nodes
elif log_entry.has_key('Resource_List.mppwidth') or log_entry.has_key('Resource_List.size'):
if log_entry.has_key('Resource_List.mppwidth'):
core_count = int(log_entry['Resource_List.mppwidth'])
# older versions on e.g. Cray machines use "size" as keyword for mppwidth or core_count
elif log_entry.has_key('Resource_List.size'):
core_count = int(log_entry['Resource_List.size'])
# get node count, mppnodect exist only in newer versions
if log_entry.has_key('Resource_List.mppnodect'):
node_count = int(log_entry['Resource_List.mppnodect'])
else:
logging.warning('Missing mppnodect for entry: %s (will guess from "core count"/mppnppn)' % job_id)
try:
node_count = core_count / int(log_entry['Resource_List.mppnppn'])
except:
logging.warning('Unable to calculate node count for entry: %s (will guess from host list)' % job_id)
# keep the default of len(hosts) given above
else:
logging.warning('Missing processor count for entry: %s (will guess from host list)' % job_id)
# assume the number of exec hosts is the core count (possibly not right)
core_count = len(hosts)
# clean data and create various composite entries from the work load trace
if job_id.isdigit() and hostname is not None:
job_identifier = job_id + '.' + hostname
else:
job_identifier = job_id
fqdn_job_id = hostname + ':' + job_identifier
if not user_name in user_map.getMapping():
self.missing_user_mappings[user_name] = True
vo_info = []
if account:
mapped_vo = vo_map.get(account)
else:
mapped_vo = vo_map.get(user_name)
if mapped_vo is not None:
voi = usagerecord.VOInformation(name=mapped_vo, type_='bart-vomap')
vo_info.append(voi)
## fill in usage record fields
ur = usagerecord.UsageRecord()
ur.record_id = fqdn_job_id
ur.local_job_id = job_identifier
ur.global_job_id = fqdn_job_id
ur.local_user_id = user_name
ur.global_user_name = user_map.get(user_name)
ur.machine_name = hostname
ur.queue = queue
ur.project_name = account
ur.processors = core_count
ur.node_count = node_count
ur.host = ','.join(hosts)
ur.submit_time = usagerecord.epoch2isoTime(submit_time)
ur.start_time = usagerecord.epoch2isoTime(start_time)
ur.end_time = usagerecord.epoch2isoTime(end_time)
ur.cpu_duration = utilized_cpu
ur.wall_duration = wall_time
ur.vo_info += vo_info
ur.exit_code = log_entry['Exit_status']
return ur
def generateUsageRecords(self,hostname, user_map, vo_map):
"""
Starts the UR generation process.
"""
torque_spool_dir = self.cfg.getConfigValue(SECTION, SPOOL_DIR, DEFAULT_SPOOL_DIR)
torque_accounting_dir = os.path.join(torque_spool_dir, 'server_priv', 'accounting')
torque_date_today = time.strftime(TORQUE_DATE_FORMAT, time.gmtime())
job_id = self.state_job_id
torque_date = self.state_log_file
self.missing_user_mappings = {}
while True:
log_file = os.path.join(torque_accounting_dir, torque_date)
tlp = TorqueLogParser(log_file)
if job_id is not None:
try:
tlp.spoolToEntry(job_id)
except IOError, e:
logging.error('Error spooling log file at %s for date %s to %s (%s)' % (log_file, torque_date, job_id, str(e)) )
job_id = None
continue
while True:
try:
log_entry = tlp.getNextLogEntry()
except IOError, e:
if torque_date == torque_date_today: # todays entry might not exist yet
break
logging.error('Error reading log file at %s for date %s (%s)' % (log_file, torque_date, str(e)))
break
if log_entry is None:
break # no more log entries
job_id = log_entry['jobid']
ur = self.createUsageRecord(log_entry, hostname, user_map, vo_map)
common.writeUr(ur,self.cfg)
self.state_job_id = job_id
self.state_log_file = torque_date
common.writeGeneratorState(self)
job_id = None
if torque_date == torque_date_today:
break
torque_date = common.getIncrementalDate(torque_date, TORQUE_DATE_FORMAT)
job_id = None
def parseGeneratorState(self,state):
"""
Get state of where to the UR generation has reached in the log.
"""
job_id = None
log_file = None
if state is None or len(state) == 0:
# Empty state data happens sometimes, usually NFS is involved :-|
# Start from yesterday (24 hours back), this should be fine assuming (at least) daily invokation.
t_old = time.time() - (24 * 3600)
log_file = time.strftime(TORQUE_DATE_FORMAT, time.gmtime(t_old))
else:
job_id, log_file = state.split(' ', 2)
if job_id == '-':
job_id = None
self.state_job_id = job_id
self.state_log_file = log_file
def createGeneratorState(self):
"""
Create the current state of where to the UR generation has reached.
"""
return '%s %s' % (self.state_job_id or '-', self.state_log_file)
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
import copy
import json
from common.errors import CommonAPIError
from . import configs
class CCClient(object):
def __init__(self, component):
self.http_client = component.outgoing.http_client
self.bk_username = component.current_user.username
self.bk_language = component.request.bk_language
self.bk_supplier_account = component.request.kwargs.get('bk_supplier_account') or \
configs.DEFAULT_BK_SUPPLIER_ACCOUNT
def request(self, method, host, path, params=None, data=None, headers={}, **kwargs):
headers = copy.copy(headers)
headers.update({
'BK_USER': self.bk_username,
'HTTP_BLUEKING_LANGUAGE': self.bk_language,
'HTTP_BK_SUPPLIER_ACCOUNT': self.bk_supplier_account,
'HTTP_BLUEKING_SUPPLIER_ID': '0',
})
return self.http_client.request(
method, host, path, params=params, data=data,
headers=headers, allow_non_200=True,
response_encoding='utf-8', response_type='text', **kwargs)
def get(self, host, path, params=None, headers={}, **kwargs):
response = self.request('GET', host, path, params=params, headers=headers, **kwargs)
return self.format_response(response)
def post(self, host, path, data=None, headers={}, **kwargs):
response = self.request('POST', host, path, data=data, headers=headers, **kwargs)
return self.format_response(response)
def put(self, host, path, data=None, headers={}, **kwargs):
response = self.request('PUT', host, path, data=data, headers=headers, **kwargs)
return self.format_response(response)
def delete(self, host, path, data=None, headers={}, **kwargs):
response = self.request('DELETE', host, path, data=data, headers=headers, **kwargs)
return self.format_response(response)
def format_response(self, response):
try:
response = json.loads(response)
except Exception:
return {
'result': False,
'code': 1306000,
'message': 'Request interface error, the response content is not a json string: %s' % response,
}
bk_error_code = response.get('bk_error_code', response.get('code'))
if bk_error_code is None:
raise CommonAPIError(
'An error occurred while requesting CC interface, '
'the response content does not contain bk_error_code field.')
elif bk_error_code == 0:
return {
'result': True,
'code': 0,
'data': response.get('data'),
'message': response.get('bk_error_msg', response.get('message')) or '',
}
else:
return {
'result': False,
'code': bk_error_code,
'data': response.get('data'),
'message': response.get('bk_error_msg', response.get('message')) or '',
}
|
import sys
from yachalk import chalk
class Output:
def __init__(self, lnd):
self.lnd = lnd
@staticmethod
def print_line(message, end='\n'):
sys.stdout.write(f"{message}{end}")
@staticmethod
def print_without_linebreak(message):
sys.stdout.write(message)
def print_route(self, route):
route_str = "\n".join(
self.get_channel_representation(h.chan_id, h.pub_key) + "\t" +
self.get_fee_information(h, route)
for h in route.hops
)
self.print_line(route_str)
def get_channel_representation(self, chan_id, pubkey_to, pubkey_from=None):
channel_id_formatted = format_channel_id(chan_id)
if pubkey_from:
alias_to_formatted = format_alias(self.lnd.get_node_alias(pubkey_to))
alias_from = format_alias(self.lnd.get_node_alias(pubkey_from))
return f"{channel_id_formatted} ({alias_from} to {alias_to_formatted})"
alias_to_formatted = format_alias(f"{self.lnd.get_node_alias(pubkey_to):32}")
return f"{channel_id_formatted} to {alias_to_formatted}"
def get_fee_information(self, next_hop, route):
hops = list(route.hops)
if hops[0] == next_hop:
ppm = self.lnd.get_ppm_to(next_hop.chan_id)
return f"(free, we usually charge {format_ppm(ppm)})"
hop = hops[hops.index(next_hop) - 1]
ppm = int(hop.fee_msat * 1_000_000 / hop.amt_to_forward_msat)
fee_formatted = "fee " + chalk.cyan(f"{hop.fee_msat:8,} mSAT")
ppm_formatted = format_ppm(ppm, 5)
return f"({fee_formatted}, {ppm_formatted})"
def format_alias(alias):
if not sys.stdout.encoding.lower().startswith('utf'):
alias = alias.encode('latin-1', 'ignore').decode()
return chalk.bold(alias)
def format_ppm(ppm, min_length=None):
if min_length:
return chalk.bold(f"{ppm:{min_length},}ppm")
return chalk.bold(f"{ppm:,}ppm")
def format_fee_msat(fee_msat, min_length=None):
if min_length:
return chalk.cyan(f"{fee_msat:{min_length},} mSAT")
return chalk.cyan(f"{fee_msat:,} mSAT")
def format_fee_msat_red(fee_msat, min_length=None):
if min_length:
return chalk.red(f"{fee_msat:{min_length},} mSAT")
return chalk.red(f"{fee_msat:,} mSAT")
def format_fee_sat(fee_sat):
return chalk.cyan(f"{fee_sat:,} sats")
def format_earning(msat, min_width=None):
if min_width:
return chalk.green(f"{msat:{min_width},} mSAT")
return chalk.green(f"{msat:,} mSAT")
def format_amount(amount, min_width=None):
if min_width:
return chalk.yellow(f"{amount:{min_width},}")
return chalk.yellow(f"{amount:,}")
def format_amount_green(amount, min_width=None):
return chalk.green(f"{amount:{min_width},}")
def format_boring_string(string):
return chalk.bg_black(chalk.gray(string))
def format_channel_id(channel_id):
return format_boring_string(channel_id)
def format_warning(warning):
return chalk.yellow(warning)
def format_error(error):
return chalk.red(error)
def print_bar(width, length):
result = chalk.bold("[")
if sys.stdout.encoding.lower().startswith('utf'):
for _ in range(0, length):
result += chalk.bold(u"\u2588")
for _ in range(length, width):
result += u"\u2591"
else:
for _ in range(0, length):
result += chalk.bold("X")
for _ in range(length, width):
result += u"."
result += chalk.bold("]")
return result
|
#!/usr/bin/python3
# Module for parsing arguments
import argparse
parser = argparse.ArgumentParser()
# Add an argument and save it in a variable named "src_dir"
# Also add description for -h (help) option
parser.add_argument("src_dir", help = "Directory to backup")
# Add an argument and save it in a variable named "dst_dir"
# Also add description for -h (help) option
parser.add_argument("dst_dir", help = "Location to store backup")
# Collect all of the arguments supplied on the command line
args = parser.parse_args()
print("Source:", args.src_dir)
print("Destination:", args.dst_dir)
|
# -*- coding: utf-8 -*-
"""
@Project: pytorch-train
@File : utils
@Author : TonyMao@AILab
@Date : 2019/11/12
@Desc : None
"""
import torch
import torchvision.transforms as transforms
from PIL import Image
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.load_size, opt.load_size]
transform_list.append(transforms.Resize(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fine_size))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fine_size))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fine_size)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.load_size)))
transform_list.append(transforms.RandomCrop(opt.fine_size))
# pass
elif opt.resize_or_crop == 'scale_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale(img, opt.load_size)
))
transform_list.append(transforms.RandomCrop(opt.fine_size))
transform_list += [transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
return transforms.Compose(transform_list)
def denorm(tensor, device, to_board=False):
std = torch.Tensor([0.229, 0.224, 0.225]).reshape(-1, 1, 1).to(device)
mean = torch.Tensor([0.485, 0.456, 0.406]).reshape(-1, 1, 1).to(device)
res = torch.clamp((tensor.to(device) * std + mean), 0, 1)
if to_board:
res = (res - 0.5) / 0.5
return res
def __scale_width(img, target_width):
ow, oh = img.size
if ow == target_width:
return img
w = target_width
h = int(target_width * oh / ow)
return img.resize((w, h), Image.BICUBIC)
def __scale_height(img, target_height):
ow, oh = img.size
if oh == target_height:
return img
w = int(target_height * ow / oh)
h = target_height
return img.resize((w, h), Image.BICUBIC)
def __scale(img, target_side):
ow, oh = img.size
if ow < oh:
return __scale_width(img, target_side)
else:
return __scale_height(img, target_side)
|
from django.shortcuts import render
from django.shortcuts import HttpResponse
import json
from django.core import serializers
from cloud.models import Data
# Create your views here.
def index(request):
return render(request, 'cloud/index.html', context={
'title': 'PRP-DGPS',
'content': 'Welcome PRP-DGPS Server :))'
})
# http://Active_Server_IP:Active_Server_Port/upload/data_type/data/time/
def upload(request, param1, param2, param3):
data = Data(data_type = param1, data = param2)
data.save()
return HttpResponse("Upload data_type = " + param1 + " data = " + param2 + " create = " + param3)
def list(request):
data_list = Data.objects.all()
data_list2 = []
for data in data_list:
data.create_time = data.create_time.strftime('%Y-%m-%d %H:%I:%S')
data_list2.append(data)
# print (data_list)
return render(request, 'cloud/list.html', context={
'title': 'PRP-DGPS Server',
'data_list': data_list2
})
def get(request):
data_list = Data.objects.all()
data_list_json = serializers.serialize("json", data_list)
return HttpResponse(data_list_json)
|
#import libraries
import picamera
from time import sleep
from PIL import Image
#set up the camera
camera = picamera.PiCamera()
try:
#capture at maximum resolution (~5MP)
camera.resolution = (1280, 720)
camera.framerate = 60
camera.vflip = True
camera.hflip = True
camera.start_preview()
#allow camera to AWB
sleep(1)
camera.capture('1_unedited.jpg')
#load the image back into python
photo = Image.open('1_unedited.jpg')
pixels = photo.load()
#apply an edit to each pixel
try:
for i in range(photo.size[0]):
for j in range(photo.size[1]):
#seperate the current pixel
pixel = pixels[i,j]
#seperate the colours
r = pixel[0]
g = pixel[1]
b = pixel[2]
#Perform our edits
r = j
g = j
b = j
#update the pixel
pixel = (r, g, b)
#place the pixel back in the image
pixels[i,j] = pixel
finally:
photo.save('2_vertical.jpg', quality=90)
finally:
camera.close()
|
import requests
import json
import base64
from urllib.parse import quote
import os
# 百度AI平台 文字识别API
API_Key = '7ozPdYCKWpXQhGLZingB9Cm8'
Secret_Key = '53GBqlVoF3PNdCNKT4h3G4YTnoAa0uhI'
# 获取文字识别的access_token
def getAipAccessToken():
# client_id 为官网获取的AK, client_secret 为官网获取的SK
host = 'https://aip.baidubce.com/oauth/2.0/token'
payload = {
'grant_type': 'client_credentials',
'client_id': API_Key,
'client_secret': Secret_Key}
response = requests.post(url=host,data=payload,verify=False)
if response.status_code == 200 :
response_dict = json.loads(response.text)
print(response_dict)
AccessToken = response_dict['access_token']
return AccessToken
return None
# 读取银行回执单
def readReceipt(access_token,image_path):
headers = {
'Content-Type': "application/x-www-form-urlencoded",
'charset': "utf-8"
}
# iocr识别api_url
recognise_api_url = "https://aip.baidubce.com/rest/2.0/solution/v1/iocr/recognise/finance"
access_token = access_token
templateSign = 'bank_receipt' # 银行回执单标准模板
detectorId = 0
#classifierId = "your_classifier_id"
# 测试数据路径
#image_path = './receipt/chinabank1.png'
try:
with open(image_path, 'rb') as f:
image_data = f.read()
image_b64 = base64.b64encode(image_data)
# 请求模板的bodys
recognise_bodys = 'access_token=' + access_token + '&templateSign=' + templateSign + \
'&image=' + quote(image_b64)
# 请求分类器的bodys
# classifier_bodys = "access_token=" + access_token + "&classifierId=" + classifierId + "&image=" + quote(image_b64.encode("utf8"))
# 混贴票据识别的bodys
# detector_bodys = "access_token=" + access_token + "&detectorId=" + str(detectorId) + "&image=" + quote(image_b64.encode("utf8"))
# 请求模板识别
response = requests.post(recognise_api_url, data=recognise_bodys, headers=headers, verify=False)
# 请求分类器识别
# response = requests.post(recognise_api_url, data=classifier_bodys, headers=headers)
# 请求混贴票据识别
# response = requests.post(recognise_api_url, data=detector_bodys, headers=headers)
if response.status_code == 200 :
response_dict = json.loads(response.text)
print(response_dict)
return response_dict
return None
except Exception as e:
print(e)
return None
# 遍历银行回执单图片文件夹
def getReceiptList(dirpath):
receiptList = []
for root, dirs, files in os.walk(dirpath):
print('根目录:{0},文件夹:{1},文件数:{2}'.format(root,dirs,len(files)))
files.sort()
for f in files:
receiptList.append(f)
return receiptList
if __name__ == "__main__":
dirpath = './receipt'
receiptList = getReceiptList(dirpath)
if len(receiptList) == 0:
print('银行回执单图片列表为空!')
exit()
access_token = None
access_token = getAipAccessToken()
if access_token == None:
print('access_token取得失败!')
exit()
print('access_token: %s' % (access_token))
for receipt in receiptList:
image_path = dirpath + '/' + receiptList[0]
receipt_date = readReceipt(access_token,image_path)
if receipt_date == None:
print('银行回执单数据读取失败!')
exit()
|
# -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404, redirect
from django.template import loader
from django.http import HttpResponse
from django.contrib import messages
from app.models import *
from app.models import idcard
import pdfkit
from qrcode import *
@login_required(login_url="/login/")
def index(request):
return render(request, "index.html")
@login_required(login_url="/login/")
def pages(request):
context = {}
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
try:
load_template = request.path.split('/')[-1]
if request.method == "POST":
load_template(request)
template = loader.get_template('pages/' + load_template)
return HttpResponse(template.render(context, request))
except:
template = loader.get_template( 'pages/error-404.html' )
return HttpResponse(template.render(context, request))
def idcard_form(request):
if request.method == "POST":
name = request.POST.get('name')
employment_id = request.POST.get('employment_id')
customer_account_number= request.POST.get('customer_account_number')
circle= request.POST.get('circle')
company_name= request.POST.get('company_name')
department= request.POST.get('department')
certificate_number= request.POST.get('certificate_number')
date= request.POST.get('date')
idcard1 = idcard(name=name,employment_id=employment_id,customer_account_no=customer_account_number,circle=circle,company_name=company_name,department=department,certificate_no=certificate_number,date=date)
idcard1.save()
messages.success(request,"Your data has been submiteed successfully!!")
return render(request,'pages/idcard-form.html',context=None)
else:
return render(request,'pages/idcard-form.html',context=None)
def idcard_data(request):
report = Report.objects.all()
return render(request,'pages/idcard_data.html',{'report':report})
def idcard_import(request):
return render(request,'pages/idcard_import.html')
def certificate_form(request):
if request.method=="POST":
name=request.POST.get('name')
training_name=request.POST.get('training_name')
certificate_validity=request.POST.get('certificate_validity')
date=request.POST.get('date')
place=request.POST.get('place')
img=make(name)
img.save("static/qr.png")
certificate=Certificate(name=name,training_name=training_name,certificate_validity=certificate_validity,date=date,place=place)
certificate.save()
messages.success(request,"Your data has been submiteed successfully!!")
return render(request,'pages/certificate_form.html',context=None)
#return HttpResponse("Hello I am certificate")
def certificate_verification(request):
return render(request,'pages/certificate-verification.html')
def printreport(request):
report = Report.objects.all().order_by('-name')
return render(request,'pages/printreport.html',{'report':report})
def reportform(request):
if request.method=="POST":
name=request.POST.get('name')
gender=request.POST.get('gender')
address=request.POST.get('address')
dob=request.POST.get('dob')
email=request.POST.get('email')
q1=request.POST.get('q1')
q2=request.POST.get('q2')
q3=request.POST.get('q3')
q4=request.POST.get('q4')
q5=request.POST.get('q5')
q6=request.POST.get('q6')
q7=request.POST.get('q7')
q8=request.POST.get('q8')
q9=request.POST.get('q9')
q10=request.POST.get('q10')
q11=request.POST.get('q11')
image=request.FILES['image']
comment=request.POST.get('comment')
height=request.POST.get('height')
weight=request.POST.get('weight')
systolic=request.POST.get('systolic')
diastolic=request.POST.get('diastolic')
pulserate=request.POST.get('pulserate')
heartsound=request.POST.get('heartsound')
peripheralpulse=request.POST.get('peripheralpulse')
chestlung=request.POST.get('chestlung')
curvicalspine=request.POST.get('curvicalspine')
backmovements=request.POST.get('backmovements')
upperlimbs=request.POST.get('upperlimbs')
jointmovements=request.POST.get('jointmovements')
reflexes=request.POST.get('reflexes')
rombergs=request.POST.get('rombergs')
hearing=request.POST.get('hearing')
vision=request.POST.get('vision')
albumin=request.POST.get('albumin')
sugar=request.POST.get('sugar')
blood_group=request.POST.get('blood_group')
rh_factor=request.POST.get('rh_factor')
hb=request.POST.get('hb')
tlc=request.POST.get('tlc')
rbc=request.POST.get('rbc')
plateletscount=request.POST.get('plateletscount')
triglycerides=request.POST.get('triglycerides')
hdl=request.POST.get('hdl')
ldl=request.POST.get('ldl')
result=request.POST.get('result')
registration_no=request.POST.get('registration_no')
date_of_examination=request.POST.get('date_of_examination')
report=Report(name=name,gender=gender, address= address,dob=dob,email=email,q1=q1,q2=q2,q3=q3,q4=q4,q5=q5,q6=q6,q7=q7,q8=q8,q9=q9,q10=q10,q11=q11,image=image,comment=comment,height=height,weight=weight,systolic=systolic,diastolic=diastolic,pulserate=pulserate,heartsound=heartsound,peripheralpulse=peripheralpulse,chestlung=chestlung,curvicalspine=curvicalspine,backmovements=backmovements,upperlimbs=upperlimbs,jointmovements=jointmovements,reflexes=reflexes,rombergs=rombergs,hearing=hearing,vision=vision,albumin=albumin,sugar=sugar,blood_group=blood_group,rh_factor=rh_factor,hb=hb,tlc=tlc,rbc=rbc,plateletscount=plateletscount,triglycerides=triglycerides,hdl=hdl,ldl=ldl,result=result,registration_no=registration_no,date_of_examination=date_of_examination)
report.save()
messages.success(request,"Your data has been submiteed successfully!!")
return render(request,'pages/reportform.html',context=None)
def report(request,pk):
report = Report.objects.get(id=pk)
#report = Report.objects.all()
qr=make(report.name)
qr.save("media/report.png")
return render(request,'pages/report.html',{'report':report,'qr':qr})
|
# Generated by Django 3.0.6 on 2020-05-22 12:10
from django.db import migrations, models
def migrate_data(apps, schema_editor):
RRset = apps.get_model('desecapi', 'RRset')
RRset.objects.filter(touched__isnull=True).update(touched=models.F('created'))
class Migration(migrations.Migration):
dependencies = [
('desecapi', '0014_rrset_touched'),
]
operations = [
migrations.RunPython(migrate_data, migrations.RunPython.noop),
migrations.AlterField(
model_name='rrset',
name='touched',
field=models.DateTimeField(auto_now=True),
),
]
|
import numpy as np
import scipy.misc
from gym.spaces.box import Box
from scipy.misc import imresize
from cached_property import cached_property
# TODO: move this to folder with different files
class BaseTransformer(object):
"""
Base transformer interface, inherited objects should conform to this
"""
def transform(self, observation):
"""
observation to transform
"""
raise NotImplementedError
def transformed_observation_space(self, prev_observation_space):
"""
prev_observation_space and how it is modified
"""
return prev_observation_space
def reset(self):
"""
resets the transformer if there is an operation to be made
"""
return
class AppendPrevTimeStepTransformer(BaseTransformer):
"""
Keeps track of and appends the previous observation timestep.
"""
def __init__(self):
self.prev_timestep = None
def transform(self, observation):
if self.prev_timestep is None:
self.prev_timestep = np.zeros(observation.shape)
new_obs = np.concatenate([observation.reshape((1, -1)), self.prev_timestep.reshape((1, -1))], axis=1).reshape(-1)
self.prev_timestep = observation
return new_obs
def transformed_observation_space(self, prev_observation_space):
if type(prev_observation_space) is Box:
#TODO: should use tile?
copy = np.copy(prev_observation_space.low.reshape((1, -1)))
low = np.concatenate([copy, copy], axis=1)
copy = np.copy(prev_observation_space.high.reshape((1, -1)))
high = np.concatenate([copy, copy], axis=1)
return Box(low.reshape(-1), high.reshape(-1))
else:
raise NotImplementedError("Currently only support Box observation spaces for ResizeImageTransformer")
return prev_observation_space
def reset(self):
self.prev_timestep = None
class SimpleNormalizePixelIntensitiesTransformer(BaseTransformer):
"""
Normalizes pixel intensities simply by dividing by 255.
"""
def transform(self, observation):
return np.array(observation).astype(np.float32) / 255.0
def transformed_observation_space(self, wrapped_observation_space):
return wrapped_observation_space
class ResizeImageTransformer(BaseTransformer):
"""
Rescale a given image by a percentage
"""
def __init__(self, fraction_of_current_size):
self.fraction_of_current_size = fraction_of_current_size
def transform(self, observation):
return scipy.misc.imresize(observation, self.fraction_of_current_size)
def transformed_observation_space(self, wrapped_observation_space):
if type(wrapped_observation_space) is Box:
return Box(scipy.misc.imresize(wrapped_observation_space.low, self.fraction_of_current_size), scipy.misc.imresize(wrapped_observation_space.high, self.fraction_of_current_size))
else:
raise NotImplementedError("Currently only support Box observation spaces for ResizeImageTransformer")
class RandomSensorMaskTransformer(BaseTransformer):
"""
Randomly occlude a given percentage of sensors on every observation.
Randomly occludes different ones every time
"""
def __init__(self, env, percent_of_sensors_to_occlude=.15):
"""
Knock out random sensors
"""
self.percent_of_sensors_to_occlude = percent_of_sensors_to_occlude
self.obs_dims = env.observation_space.flat_dim
def occlude(self, obs):
sensor_idx = np.random.randint(0, self.obs_dims-1, size=int(self.obs_dims * self.percent_of_sensors_to_occlude))
obs[sensor_idx] = 0
return obs
def transform(self, observation):
return self.occlude(observation)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers import DownBlock, Conv, ResnetTransformer
from .stn_losses import smoothness_loss
sampling_align_corners = False
sampling_mode = 'bilinear'
# The number of filters in each block of the encoding part (down-sampling).
ndf = {'A': [32, 64, 64, 64, 64, 64, 64], }
# The number of filters in each block of the decoding part (up-sampling).
# If len(ndf[cfg]) > len(nuf[cfg]) - then the deformation field is up-sampled to match the input size.
nuf = {'A': [64, 64, 64, 64, 64, 64, 32], }
# Indicate if res-blocks are used in the down-sampling path.
use_down_resblocks = {'A': True, }
# indicate the number of res-blocks applied on the encoded features.
resnet_nblocks = {'A': 3, }
# Indicate if the a final refinement layer is applied on the before deriving the deformation field
refine_output = {'A': True, }
# The activation used in the down-sampling path.
down_activation = {'A': 'leaky_relu', }
# The activation used in the up-sampling path.
up_activation = {'A': 'leaky_relu', }
class ResUnet(torch.nn.Module):
"""Predicts a dense deofmration field that aligns two given images.
The networks is unet-based network with (possibly) residual blocks. The residual blocks may be used in the
in the down-sampling path, on the encoded features and prior to the deformation field generation."""
def __init__(self, nc_a, nc_b, cfg, init_func, init_to_identity):
super(ResUnet, self).__init__()
act = down_activation[cfg]
# ------------ Down-sampling path
self.ndown_blocks = len(ndf[cfg])
self.nup_blocks = len(nuf[cfg])
assert self.ndown_blocks >= self.nup_blocks
in_nf = nc_a + nc_b
conv_num = 1
skip_nf = {}
for out_nf in ndf[cfg]:
setattr(self, 'down_{}'.format(conv_num),
DownBlock(in_nf, out_nf, 3, 1, 1, activation=act, init_func=init_func, bias=True,
use_resnet=use_down_resblocks[cfg], use_norm=False))
skip_nf['down_{}'.format(conv_num)] = out_nf
in_nf = out_nf
conv_num += 1
conv_num -= 1
if use_down_resblocks[cfg]:
self.c1 = Conv(in_nf, 2 * in_nf, 1, 1, 0, activation=act, init_func=init_func, bias=True,
use_resnet=False, use_norm=False)
self.t = ((lambda x: x) if resnet_nblocks[cfg] == 0
else ResnetTransformer(2 * in_nf, resnet_nblocks[cfg], init_func))
self.c2 = Conv(2 * in_nf, in_nf, 1, 1, 0, activation=act, init_func=init_func, bias=True,
use_resnet=False, use_norm=False)
# ------------- Up-sampling path
act = up_activation[cfg]
for out_nf in nuf[cfg]:
setattr(self, 'up_{}'.format(conv_num),
Conv(in_nf + skip_nf['down_{}'.format(conv_num)], out_nf, 3, 1, 1, bias=True, activation=act,
init_fun=init_func, use_norm=False, use_resnet=False))
in_nf = out_nf
conv_num -= 1
if refine_output[cfg]:
self.refine = nn.Sequential(ResnetTransformer(in_nf, 1, init_func),
Conv(in_nf, in_nf, 1, 1, 0, use_resnet=False, init_func=init_func,
activation=act,
use_norm=False)
)
else:
self.refine = lambda x: x
self.output = Conv(in_nf, 2, 3, 1, 1, use_resnet=False, bias=True,
init_func=('zeros' if init_to_identity else init_func), activation=None,
use_norm=False)
def forward(self, img_a, img_b):
x = torch.cat([img_a, img_b], 1)
skip_vals = {}
conv_num = 1
# Down
while conv_num <= self.ndown_blocks:
x, skip = getattr(self, 'down_{}'.format(conv_num))(x)
skip_vals['down_{}'.format(conv_num)] = skip
conv_num += 1
if hasattr(self, 't'):
x = self.c1(x)
x = self.t(x)
x = self.c2(x)
# Up
conv_num -= 1
while conv_num > (self.ndown_blocks - self.nup_blocks):
s = skip_vals['down_{}'.format(conv_num)]
x = F.interpolate(x, (s.size(2), s.size(3)), mode='bilinear')
x = torch.cat([x, s], 1)
x = getattr(self, 'up_{}'.format(conv_num))(x)
conv_num -= 1
x = self.refine(x)
x = self.output(x)
return x
class UnetSTN(nn.Module):
"""This class is generates and applies the deformable transformation on the input images."""
def __init__(self, in_channels_a, in_channels_b, height, width, cfg, init_func, stn_bilateral_alpha,
init_to_identity, multi_resolution_regularization):
super(UnetSTN, self).__init__()
self.oh, self.ow = height, width
self.in_channels_a = in_channels_a
self.in_channels_b = in_channels_b
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.offset_map = ResUnet(self.in_channels_a, self.in_channels_b, cfg, init_func, init_to_identity).to(
self.device)
self.identity_grid = self.get_identity_grid()
self.alpha = stn_bilateral_alpha
self.multi_resolution_regularization = multi_resolution_regularization
def get_identity_grid(self):
"""Returns a sampling-grid that represents the identity transformation."""
x = torch.linspace(-1.0, 1.0, self.ow)
y = torch.linspace(-1.0, 1.0, self.oh)
xx, yy = torch.meshgrid([y, x])
xx = xx.unsqueeze(dim=0)
yy = yy.unsqueeze(dim=0)
identity = torch.cat((yy, xx), dim=0).unsqueeze(0)
return identity
def get_grid(self, img_a, img_b, return_offsets_only=False):
"""Return the predicted sampling grid that aligns img_a with img_b."""
if img_a.is_cuda and not self.identity_grid.is_cuda:
self.identity_grid = self.identity_grid.to(img_a.device)
# Get Deformation Field
b_size = img_a.size(0)
deformation = self.offset_map(img_a, img_b)
deformation_upsampled = deformation
if deformation.size(2) != self.oh and deformation.size(3) != self.ow:
deformation_upsampled = F.interpolate(deformation, (self.oh, self.ow), mode=sampling_mode,
align_corners=sampling_align_corners)
if return_offsets_only:
resampling_grid = deformation_upsampled.permute([0, 2, 3, 1])
else:
resampling_grid = (self.identity_grid.repeat(b_size, 1, 1, 1) + deformation_upsampled).permute([0, 2, 3, 1])
return resampling_grid
def forward(self, img_a, img_b, apply_on=None):
"""
Predicts the spatial alignment needed to align img_a with img_b. The spatial transformation will be applied
on the tensors passed by apply_on (if apply_on is None then the transformation will be applied on img_a).
:param img_a: the source image.
:param img_b: the target image.
:param apply_on: the geometric transformation can be applied on different tensors provided by this list.
If not set, then the transformation will be applied on img_a.
:return: a list of the warped images (matching the order they appeared in apply on), and the regularization term
calculated for the predicted transformation."""
if img_a.is_cuda and not self.identity_grid.is_cuda:
self.identity_grid = self.identity_grid.to(img_a.device)
# Get Deformation Field
b_size = img_a.size(0)
deformation = self.offset_map(img_a, img_b)
deformation_upsampled = deformation
if deformation.size(2) != self.oh and deformation.size(3) != self.ow:
deformation_upsampled = F.interpolate(deformation, (self.oh, self.ow), mode=sampling_mode)
resampling_grid = (self.identity_grid.repeat(b_size, 1, 1, 1) + deformation_upsampled).permute([0, 2, 3, 1])
# Wrap image wrt to the defroamtion field
if apply_on is None:
apply_on = [img_a]
warped_images = []
for img in apply_on:
warped_images.append(F.grid_sample(img, resampling_grid, mode=sampling_mode, padding_mode='zeros',
align_corners=sampling_align_corners))
# Calculate STN regulization term
reg_term = self._calculate_regularization_term(deformation, warped_images[0])
return warped_images, reg_term
def _calculate_regularization_term(self, deformation, img):
"""Calculate the regularization term of the predicted deformation.
The regularization may-be applied to different resolution for larger images."""
dh, dw = deformation.size(2), deformation.size(3)
img = None if img is None else img.detach()
reg = 0.0
factor = 1.0
for i in range(self.multi_resolution_regularization):
if i != 0:
deformation_resized = F.interpolate(deformation, (dh // (2 ** i), dw // (2 ** i)), mode=sampling_mode,
align_corners=sampling_align_corners)
img_resized = F.interpolate(img, (dh // (2 ** i), dw // (2 ** i)), mode=sampling_mode,
align_corners=sampling_align_corners)
elif deformation.size()[2::] != img.size()[2::]:
deformation_resized = deformation
img_resized = F.interpolate(img, deformation.size()[2::], mode=sampling_mode,
align_corners=sampling_align_corners)
else:
deformation_resized = deformation
img_resized = img
reg += factor * smoothness_loss(deformation_resized, img_resized, alpha=self.alpha)
factor /= 2.0
return reg
|
#import json
import discord
from discord.ext import commands
#resultsFile = open('polls.json')
#resultsLoad = json.load(resultsFile)
emojiList = [
'1️⃣',
'2️⃣',
'3️⃣',
'4️⃣'
]
class Polling(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def poll(self, ctx, *, args):
"""Adds a new poll to be voted on"""
pollCat = 'uncategorised'
pollOptions = list()
maxOptions = 4
if args.find('+') != -1:
pollCat = args[0:args.find(' ')]
args = args[args.find(' '):len(args)]
titleStart = args.find('{') + 1
titleEnd = args.find('}')
pollTitle = args[titleStart:titleEnd]
args = args[titleEnd:len(args)]
while maxOptions > 0:
optionStart = args.find('[') + 1
optionEnd = args.find(']')
if args[optionStart:optionEnd] == '':
break
pollOptions.append(args[optionStart:optionEnd])
args = args[optionEnd + 1:len(args)]
maxOptions = maxOptions - 1
pollBody = ''
for x in range(len(pollOptions)):
pollBody += str(x + 1) + '. ' + pollOptions[x] + '\n'
pollInfo = f'New **{pollCat}** poll ready to vote on!'
embed = discord.Embed(title=pollTitle, description=pollBody)
pollPost = await ctx.send(embed=embed, content=pollInfo)
for x in range(len(pollOptions)):
await pollPost.add_reaction(emojiList[x])
def setup(bot):
bot.add_cog(Polling(bot))
|
from .framework import (
managed_history,
selenium_test,
SeleniumTestCase
)
class HistoryStructureTestCase(SeleniumTestCase):
ensure_registered = True
@selenium_test
@managed_history
def test_history_structure(self):
def assert_details(expected_to_be_visible):
error_message = "details are visible!"
if expected_to_be_visible:
error_message = "details are not visible!"
assert expected_to_be_visible == self.components.history_structure.details.is_displayed, error_message
self.perform_upload(self.get_filename("1.fasta"))
self.wait_for_history()
self.components.history_structure.header.assert_absent_or_hidden()
self.components.history_structure.dataset.assert_absent_or_hidden()
self.history_panel_show_structure()
# assert details are not visible before expand
self.components.history_structure.details.assert_absent_or_hidden()
self.components.history_structure.dataset.wait_for_and_click()
self.sleep_for(self.wait_types.UX_RENDER)
assert_details(True)
self.components.history_structure.header.wait_for_and_click()
self.sleep_for(self.wait_types.UX_RENDER)
assert_details(False)
self.components.history_structure.dataset.assert_absent_or_hidden()
|
damage_data = {}
#Don't touch the above line
"""
mystats module for BombSquad version 1.5.29
Provides functionality for dumping player stats to disk between rounds.
"""
ranks=[]
top3Name=[]
import threading,json,os,urllib.request,ba,_ba,setting
from ba._activity import Activity
from ba._music import setmusic, MusicType
# False-positive from pylint due to our class-generics-filter.
from ba._player import EmptyPlayer # pylint: disable=W0611
from ba._team import EmptyTeam # pylint: disable=W0611
from typing import Any, Dict, Optional
from ba._lobby import JoinInfo
from ba import _activitytypes as ba_actypes
from ba._activitytypes import *
import urllib.request
import custom_hooks
import datetime
#variables
our_settings = setting.get_settings_data()
# where our stats file and pretty html output will go
base_path = os.path.join(_ba.env()['python_directory_user'],"stats" + os.sep)
statsfile = base_path + 'stats.json'
htmlfile = base_path + 'stats_page.html'
table_style = "{width:100%;border: 3px solid black;border-spacing: 5px;border-collapse:collapse;text-align:center;background-color:#fff}"
heading_style = "{border: 3px solid black;text-align:center;}"
html_start = f'''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Test Server</title>
<style>table{table_style} th{heading_style}</style>
</head>
<body>
<h3 style="text-align:center">Top 200 Players </h3>
<table border=1>
<tr>
<th><b>Rank</b></th>
<th style="text-align:center"><b>Name</b></th>
<th><b>Score</b></th>
<th><b>Kills</b></th>
<th><b>Deaths</b></th>
<th><b>Games Played</b></th>
</tr>
'''
statsDefault={
"pb-IF4VAk4a": {
"rank": 65,
"name": "pb-IF4VAk4a",
"scores": 0,
"total_damage": 0.0,
"kills": 0,
"deaths": 0,
"games": 18,
"kd": 0.0,
"avg_score": 0.0,
"aid": "pb-IF4VAk4a"
}
}
# <th><b>Total Damage</b></th> #removed this line as it isn't crt data
#useful functions
seasonStartDate=None
import shutil,os
def get_all_stats():
global seasonStartDate
if os.path.exists(statsfile):
renameFile=False
with open(statsfile, 'r',encoding='utf8') as f:
jsonData=json.loads(f.read())
try:
stats=jsonData["stats"]
seasonStartDate=datetime.datetime.strptime(jsonData["startDate"],"%d-%m-%Y")
if (datetime.datetime.now()-seasonStartDate).days >=our_settings["statsResetAfterDays"]:
backupStatsFile()
seasonStartDate=datetime.datetime.now()
return statsDefault
return stats
except OSError as e:
print(e)
return jsonData
else:
return {}
def backupStatsFile():
shutil.copy(statsfile,statsfile.replace(".json","")+str(seasonStartDate)+".json")
def dump_stats(s: dict):
global seasonStartDate
if seasonStartDate ==None:
seasonStartDate=datetime.datetime.now()
s={"startDate":seasonStartDate.strftime("%d-%m-%Y") , "stats":s}
if os.path.exists(statsfile):
with open(statsfile, 'w',encoding='utf8') as f:
f.write(json.dumps(s, indent=4,ensure_ascii=False))
f.close()
else: print('Stats file not found!')
def get_stats_by_id(ID: str):
a = get_all_stats()
if ID in a:
return a[ID]
else:
return None
def refreshStats():
# lastly, write a pretty html version.
# our stats url could point at something like this...
pStats = get_all_stats()
# f=open(htmlfile, 'w')
# f.write(html_start)
entries = [(a['scores'], a['kills'], a['deaths'], a['games'], a['name'], a['aid']) for a in pStats.values()]
# this gives us a list of kills/names sorted high-to-low
entries.sort(reverse=True)
rank = 0
toppers = {}
toppersIDs=[]
_ranks=[]
for entry in entries:
if True:
rank += 1
scores = str(entry[0])
kills = str(entry[1])
deaths = str(entry[2])
games = str(entry[3])
name = str(entry[4])
aid = str(entry[5])
if rank < 6: toppersIDs.append(aid)
#The below kd and avg_score will not be added to website's html document, it will be only added in stats.json
try:
kd = str(float(kills) / float(deaths))
kd_int = kd.split('.')[0]
kd_dec = kd.split('.')[1]
p_kd = kd_int + '.' + kd_dec[:3]
except Exception:
p_kd = "0"
try:
avg_score = str(float(scores) / float(games))
avg_score_int = avg_score.split('.')[0]
avg_score_dec = avg_score.split('.')[1]
p_avg_score = avg_score_int + '.' + avg_score_dec[:3]
except Exception:
p_avg_score = "0"
if damage_data and aid in damage_data:
dmg = damage_data[aid]
dmg = str(str(dmg).split('.')[0] + '.' + str(dmg).split('.')[1][:3])
else: dmg = 0
_ranks.append(aid)
pStats[str(aid)]["rank"] = int(rank)
pStats[str(aid)]["scores"] = int(scores)
pStats[str(aid)]["total_damage"] += float(dmg) #not working properly
pStats[str(aid)]["games"] = int(games)
pStats[str(aid)]["kills"] = int(kills)
pStats[str(aid)]["deaths"] = int(deaths)
pStats[str(aid)]["kd"] = float(p_kd)
pStats[str(aid)]["avg_score"] = float(p_avg_score)
# if rank < 201:
# #<td>{str(dmg)}</td> #removed this line as it isn't crt data
# f.write(f'''
# <tr>
# <td>{str(rank)}</td>
# <td style="text-align:center">{str(name)}</td>
# <td>{str(scores)}</td>
# <td>{str(kills)}</td>
# <td>{str(deaths)}</td>
# <td>{str(games)}</td>
# </tr>''')
# f.write('''
# </table>
# </body>
# </html>''')
# f.close()
global ranks
ranks=_ranks
dump_stats(pStats)
updateTop3Names(toppersIDs[0:3])
from playersData import pdata
pdata.update_toppers(toppersIDs)
def update(score_set):
"""
Given a Session's ScoreSet, tallies per-account kills
and passes them to a background thread to process and
store.
"""
# look at score-set entries to tally per-account kills for this round
account_kills = {}
account_deaths = {}
account_scores = {}
for p_entry in score_set.get_records().values():
account_id = p_entry.player.get_account_id()
if account_id is not None:
account_kills.setdefault(account_id, 0) # make sure exists
account_kills[account_id] += p_entry.accum_kill_count
account_deaths.setdefault(account_id, 0) # make sure exists
account_deaths[account_id] += p_entry.accum_killed_count
account_scores.setdefault(account_id, 0) # make sure exists
account_scores[account_id] += p_entry.accumscore
# Ok; now we've got a dict of account-ids and kills.
# Now lets kick off a background thread to load existing scores
# from disk, do display-string lookups for accounts that need them,
# and write everything back to disk (along with a pretty html version)
# We use a background thread so our server doesn't hitch while doing this.
if account_scores:
UpdateThread(account_kills, account_deaths, account_scores).start()
class UpdateThread(threading.Thread):
def __init__(self, account_kills, account_deaths, account_scores):
threading.Thread.__init__(self)
self._account_kills = account_kills
self.account_deaths = account_deaths
self.account_scores = account_scores
def run(self):
# pull our existing stats from disk
import datetime
try:
stats=get_all_stats()
except:
stats={}
# now add this batch of kills to our persistant stats
for account_id, kill_count in self._account_kills.items():
# add a new entry for any accounts that dont have one
if account_id not in stats:
# also lets ask the master-server for their account-display-str.
# (we only do this when first creating the entry to save time,
# though it may be smart to refresh it periodically since
# it may change)
stats[account_id] = {'rank': 0,
'name': "deafult name",
'scores': 0,
'total_damage': 0,
'kills': 0,
'deaths': 0,
'games': 0,
'kd': 0,
'avg_score': 0,
'last_seen':str(datetime.datetime.now()),
'aid': str(account_id)}
#Temporary codes to change 'name_html' to 'name'
# if 'name_html' in stats[account_id]:
# stats[account_id].pop('name_html')
# stats[account_id]['name'] = 'default'
url="http://bombsquadgame.com/bsAccountInfo?buildNumber=20258&accountID="+account_id
data=urllib.request.urlopen(url)
if data is not None:
try:
name=json.loads(data.read())["profileDisplayString"]
except ValueError:
stats[account_id]['name']= "???"
else:
stats[account_id]['name']= name
# now increment their kills whether they were already there or not
stats[account_id]['kills'] += kill_count
stats[account_id]['deaths'] += self.account_deaths[account_id]
stats[account_id]['scores'] += self.account_scores[account_id]
stats[account_id]['last_seen'] = str(datetime.datetime.now())
# also incrementing the games played and adding the id
stats[account_id]['games'] += 1
stats[account_id]['aid'] = str(account_id)
# dump our stats back to disk
tempppp = None
from datetime import datetime
dump_stats(stats)
# aaand that's it! There IS no step 27!
now = datetime.now()
update_time = now.strftime("%S:%M:%H - %d %b %y")
#print(f"Added {str(len(self._account_kills))} account's stats entries. || {str(update_time)}")
refreshStats()
def getRank(acc_id):
global ranks
if ranks==[]:
refreshStats()
if acc_id in ranks:
return ranks.index(acc_id)+1
def updateTop3Names(ids):
global top3Name
names=[]
for id in ids:
url="http://bombsquadgame.com/bsAccountInfo?buildNumber=20258&accountID="+id
data=urllib.request.urlopen(url)
if data is not None:
try:
name=json.loads(data.read())["profileDisplayString"]
except ValueError:
names.append("???")
else:
names.append(name)
top3Name=names
|
import os, re, string
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import bokeh.palettes as bp
from loguru import logger
from GEN_Utils import FileHandling
from ProteomicsUtils import StatUtils
logger.info('Import OK')
input_folder = 'results/recombinant_denaturation/initial_cleanup/'
output_folder = 'results/recombinant_denaturation/kinetic_fitting/'
urea_conc = list(np.arange(0, 6.5, 0.5))
urea_conc.pop(-2)
urea_pos = np.arange(1, len(urea_conc)+1, 1)
colours = list(reversed(bp.magma(12)))
if not os.path.exists(output_folder):
os.mkdir(output_folder)
## Import and cleanup raw plate reader data for each sample
file_list = [filename for filename in os.listdir(input_folder)]
for filename in file_list:
sample_name = filename.split('_')[0]
cleaned_kinetic = pd.read_excel(f'{input_folder}{filename}', sheet_name=None)
cleaned_kinetic.update({key: df.drop([col for col in df.columns.tolist() if 'Unnamed: ' in col], axis=1) for key, df in cleaned_kinetic.items()})
# Regenerate single df with replicate labels
clean_data = pd.concat((cleaned_kinetic.values()))
samples = ['TRP_1', 'TRP_2', 'TRP_3', 'TPE_1', 'TPE_2', 'TPE_3', 'TRP_control', 'TPE_control']
sample_pos = [x for x in string.ascii_uppercase[0:len(samples)]]
sample_map = dict(zip(sample_pos, samples))
clean_data['replicate'] = clean_data['Well\nRow'].map(sample_map)
clean_data.rename(columns={'Well\nCol':'sample_col', 'Well\nRow':'sample_row'}, inplace=True)
# For TPE at each concentration of urea, collect first 9 minutes and fit with linear regression
data_for_fit = {}
time_range = np.arange(0, 9, 1)
for replicate, df in clean_data.groupby('replicate'):
## Cleaning dataframe for plotting
test_sample = df.copy().drop(['samples', 'urea', 'replicate'] , axis=1).set_index(['sample_row', 'sample_col']).T
test_sample.index = [np.arange(0, 60, 1)]
data_for_fit[replicate] = test_sample.loc[time_range]
fit_param_dict = {}
for sample in samples:
fit_params = pd.DataFrame(index=['gradient', 'intercept', 'r_squared'])
for x, col in enumerate(data_for_fit[sample].columns.tolist()):
# Collecting data
x_data = time_range.astype(float)
y_data = data_for_fit[sample][col].astype(float).tolist()
norm_y_data = [value - y_data[0] for value in y_data]
# Calculating fit parameters
x_fit, y_fit, x_data_fitted, y_data_fitted, popt, pcov = StatUtils.fit_calculator(x_data, norm_y_data, StatUtils.linear)
residuals = norm_y_data - StatUtils.linear(x_data, *popt)
ss_res = np.sum(residuals**2)
ss_tot = np.sum((norm_y_data-np.mean(norm_y_data))**2)
r_squared = 1 - (ss_res / ss_tot)
fit_params[col] = list(popt) + [r_squared]
fit_param_dict[sample] = fit_params
sheetnames = [f'{x}' for x in list(fit_param_dict.keys())]
dataframes = [df.reset_index() for df in list(fit_param_dict.values())]
FileHandling.df_to_excel(
f'{output_folder}{sample_name}_Norm_fit_params.xlsx',
sheetnames=sheetnames,
data_frames=dataframes)
# ## Collecting gradients for each concentration at each sample
gradients_dict = {
sample: fit_param_dict[sample].loc['gradient'].reset_index(drop=True)
for sample in samples
}
gradients = pd.DataFrame.from_dict(gradients_dict, orient='columns')
gradients['urea'] = urea_conc
tpe_cols = ['urea'] + [col for col in gradients.columns.tolist() if 'TPE' in col]
trp_cols = ['urea'] + [col for col in gradients.columns.tolist() if 'TRP' in col]
data_frames = [gradients[tpe_cols], gradients[trp_cols]]
FileHandling.df_to_excel(
output_path=f'{output_folder}{sample_name}_gradient_summary.xlsx',
data_frames=data_frames,
sheetnames=['TPE', 'TRP'])
|
import torch
from torch import nn
# Disentangle NCE loss in MoCo manner
class DisNCELoss(nn.Module):
def __init__(self, opt):
super().__init__()
self.opt = opt
# feat_B for the background, feat_R for the rain
# shape: (num_patches * batch_size, feature length)
def forward(self, featB, featR):
batch_size = self.opt.batch_size
num_patches = featB.shape[0]/batch_size
if featR.shape[0] != num_patches*batch_size:
raise ValueError('num_patches of rain and background are not equal')
# making labels
labels = torch.cat([torch.ones(num_patches,1), torch.zeros(num_patches, 1)], dim=0)
loss_dis_total = 0
# obtain each background and the rain layer to calculate the disentangle loss
for i in range(0, batch_size):
cur_featB = featB[i*num_patches:(i+1)*num_patches,:]
cur_featR = featR[i*num_patches:(i+1)*num_patches,:]
cur_disloss = self.cal_each_disloss(cur_featB, cur_featR, labels)
loss_dis_total += cur_disloss
# cur_featB: [num_patches, feature length]
# labels: [num_patches*2, 1]
def cal_each_disloss(cur_featB, cur_featR, labels):
featFusion = torch.cat([cur_featB, cur_featR], dim=0)
mask = torch.eq(labels, labels.t()).float()
# contrast_count: number of all the rain and background patches
contrast_feature = featFusion
contrast_count = features.shape[1]
anchor_feature = contrast_feature
anchor_count = contrast_count
# compute logits of all the elements
# Denoting: zi: one sample, zl: all the other samples, zp: positives to zi, za: negatives to zi
# anchor_dot_contrast = zi * zl
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T),
self.opt.nce_T)
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
# tile mask, repeat the masks to match the n_views of positives
mask = mask.repeat(anchor_count, contrast_count)
# mask-out self-contrast cases
logits_mask = torch.ones_like(mask).scatter_(1,
torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
0)
mask = mask * logits_mask
# compute log_prob
# exp_logits: exp(zi * zl)
exp_logits = torch.exp(logits) * logits_mask
# the meaning of sum(1): sum the cosine similarity of one sample and all the other samples
# log_prob: (zi*zl) - log(sum(exp(zi,zl))) = log[exp(zi*zl) / sum(exp(zi * zl)) ]
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positive
# (mask * log_prob).sum(1): log [sum(exp(zi*zp)) / sum(exp(zi*zl)) ]
# mask.sum(1): |P(i)|
# mean_log_prob_pos: L^sup_out
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
# loss
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.view(anchor_count, batch_size).mean()
return loss
|
#!/usr/bin/env python
import argparse
from collections import OrderedDict
import sys
from glob import glob
from os.path import join, splitext, basename
XRDB2REM = [
("# Head", "\n" "[ssh_colors]" ),
("background_color", "background = "),
("cursor_color", "cursor = "),
("foreground_color", "foreground = "),
("ansi_0_color", "color0 = "),
("ansi_1_color", "color1 = "),
("ansi_2_color", "color2 = "),
("ansi_3_color", "color3 = "),
("ansi_4_color", "color4 = "),
("ansi_5_color", "color5 = "),
("ansi_6_color", "color6 = "),
("ansi_7_color", "color7 = "),
("ansi_8_color", "color8 = "),
("ansi_9_color", "color9 = "),
("ansi_10_color", "color10 = "),
("ansi_11_color", "color11 = "),
("ansi_12_color", "color12 = "),
("ansi_13_color", "color13 = "),
("ansi_14_color", "color14 = "),
("ansi_15_color", "color15 = "),
("bold_color", "colorBD = "),
("italic_color", "colorIT = "),
("underline_color", "colorUL = "),
]
class XrdbEntry(object):
def __init__(self, define: str, key: str, value: str, *args: str):
super().__init__()
self.define = define
self.key = key.lower()
self.value = value
def commented(self):
return self.define.strip().startswith("!")
def convert(xrdb_colors, remmina_out=sys.stdout):
remmina = OrderedDict(XRDB2REM)
for xrdb_key in remmina.keys():
if xrdb_key in xrdb_colors:
remmina[xrdb_key] = remmina[xrdb_key] + xrdb_colors[xrdb_key]
else:
remmina[xrdb_key] = remmina[xrdb_key]
try:
f = remmina_out
if not hasattr(f, 'close'):
f = open(remmina_out, 'w')
for value in remmina.values():
print(value.strip(), file=f)
finally:
if f != sys.stdout:
f.close()
def read_xrdb(itermcolors_input=sys.stdin):
xrdb_colors = dict()
try:
f = itermcolors_input
if not hasattr(f, 'close'):
f = open(itermcolors_input, 'r')
for line in f:
xrdb_entry = XrdbEntry(*line.split())
if not xrdb_entry.commented():
xrdb_colors.setdefault(xrdb_entry.key, xrdb_entry.value)
finally:
f.close()
return xrdb_colors
def main(xrdb_path, output_path=None):
for f in glob(join(xrdb_path, '*.xrdb')):
xrdb_in = read_xrdb(f)
base_name = splitext(basename(f))[0]
remmina_out = output_path and join(output_path, base_name + '.colors') or sys.stdout
convert(xrdb_in, remmina_out)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Translate X color schemes to Remmina .colors format')
parser.add_argument('xrdb_path', type=str, help='path to xrdb files')
parser.add_argument('-d', '--out-directory', type=str, dest='output_path',
help='path where Remmina .colors config files will be' +
' created, if not provided then will be printed')
args = parser.parse_args()
main(args.xrdb_path, args.output_path)
|
"""Store version constants."""
MAJOR_VERSION = 0
MINOR_VERSION = 17
PATCH_VERSION = '0'
__version__ = '{}.{}.{}'.format(MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION)
|
"""
Support for reading and writing the `AXT`_ format used for pairwise
alignments.
.. _AXT: http://genome.ucsc.edu/goldenPath/help/axt.html
"""
from bx.align import *
import itertools
from bx import interval_index_file
# Tools for dealing with pairwise alignments in AXT format
class MultiIndexed( object ):
"""Similar to 'indexed' but wraps more than one axt_file"""
def __init__( self, axt_filenames, keep_open=False ):
self.indexes = [ Indexed( axt_file, axt_file + ".index" ) for axt_file in axt_filenames ]
def get( self, src, start, end ):
blocks = []
for index in self.indexes: blocks += index.get( src, start, end )
return blocks
class Indexed( object ):
"""Indexed access to a axt using overlap queries, requires an index file"""
def __init__( self, axt_filename, index_filename=None, keep_open=False, species1 = None, species2=None, species_to_lengths=None, support_ids=False ):
if index_filename is None: index_filename = axt_filename + ".index"
self.indexes = interval_index_file.Indexes( filename=index_filename )
self.axt_filename = axt_filename
# nota bene: (self.species1 = species1 or "species1") is incorrect if species1=""
self.species1 = species1
if (self.species1 == None): self.species1 = "species1"
self.species2 = species2
if (self.species2 == None): self.species2 = "species2"
self.species_to_lengths = species_to_lengths
self.support_ids = support_ids # for extra text at end of axt header lines
if keep_open:
self.f = open( axt_filename )
else:
self.f = None
def get( self, src, start, end ):
intersections = self.indexes.find( src, start, end )
return itertools.imap( self.get_axt_at_offset, [ val for start, end, val in intersections ] )
def get_axt_at_offset( self, offset ):
if self.f:
self.f.seek( offset )
return read_next_axt( self.f, self.species1, self.species2, self.species_to_lengths, self.support_ids )
else:
f = open( self.axt_filename )
try:
f.seek( offset )
return read_next_axt( f, self.species1, self.species2, self.species_to_lengths, self.support_ids )
finally:
f.close()
class Reader( object ):
"""Iterate over all axt blocks in a file in order"""
def __init__( self, file, species1 = None, species2=None, species_to_lengths=None, support_ids=False ):
self.file = file
# nota bene: (self.species1 = species1 or "species1") is incorrect if species1=""
self.species1 = species1
if (self.species1 == None): self.species1 = "species1"
self.species2 = species2
if (self.species2 == None): self.species2 = "species2"
self.species_to_lengths = species_to_lengths
self.support_ids = support_ids # for extra text at end of axt header lines
self.attributes = {}
def next( self ):
return read_next_axt( self.file, self.species1, self.species2, self.species_to_lengths, self.support_ids )
def __iter__( self ):
return ReaderIter( self )
def close( self ):
self.file.close()
class ReaderIter( object ):
def __init__( self, reader ):
self.reader = reader
def __iter__( self ):
return self
def next( self ):
v = self.reader.next()
if not v: raise StopIteration
return v
class Writer( object ):
def __init__( self, file, attributes={} ):
self.file = file
self.block = 0
self.src_split = True
if ("src_split" in attributes):
self.src_split = attributes["src_split"]
def write( self, alignment ):
if (len(alignment.components) != 2):
raise ValueError("%d-component alignment is not compatible with axt" % \
len(alignment.components))
c1 = alignment.components[0]
c2 = alignment.components[1]
if c1.strand != "+":
c1 = c1.reverse_complement()
c2 = c2.reverse_complement()
if (self.src_split):
spec1,chr1 = src_split( c1.src )
spec2,chr2 = src_split( c2.src )
else:
chr1,chr2 = c1.src,c2.src
self.file.write( "%d %s %d %d %s %d %d %s %s\n" % \
(self.block,chr1,c1.start+1,c1.start+c1.size,
chr2,c2.start+1,c2.start+c2.size,c2.strand,
alignment.score))
self.file.write( "%s\n" % c1.text )
self.file.write( "%s\n" % c2.text )
self.file.write( "\n" )
self.block += 1
def close( self ):
self.file.close()
# ---- Helper methods ---------------------------------------------------------
# typical axt block:
# 0 chr19 3001012 3001075 chr11 70568380 70568443 - 3500 [optional text]
# TCAGCTCATAAATCACCTCCTGCCACAAGCCTGGCCTGGTCCCAGGAGAGTGTCCAGGCTCAGA
# TCTGTTCATAAACCACCTGCCATGACAAGCCTGGCCTGTTCCCAAGACAATGTCCAGGCTCAGA
# start and stop are origin-1, inclusive
# first species is always on plus strand
# when second species is on minus strand, start and stop are counted from sequence end
def read_next_axt( file, species1, species2, species_to_lengths=None, support_ids=False ):
line = readline( file, skip_blank=True )
if not line: return
fields = line.split()
if (len(fields) < 9) or ((not support_ids) and (len(fields) > 9)):
raise ValueError("bad axt-block header: %s" % line)
attributes = {}
if (len(fields) > 9):
attributes["id"] = "_".join(fields[9:])
seq1 = readline( file )
if not line or line.isspace(): raise ValueError("incomplete axt-block; header: %s" % line)
seq2 = readline( file )
if not line or line.isspace(): raise ValueError("incomplete axt-block; header: %s" % line)
# Build 2 component alignment
alignment = Alignment(attributes=attributes,species_to_lengths=species_to_lengths)
# Build component for species 1
component = Component()
component.src = fields[1]
if (species1 != ""): component.src = species1 + "." + component.src
component.start = int( fields[2] ) - 1 # (axt intervals are origin-1
end = int( fields[3] ) # and inclusive on both ends)
component.size = end - component.start
component.strand = "+"
component.text = seq1.strip()
alignment.add_component( component )
# Build component for species 2
component = Component()
component.src = fields[4]
if (species2 != ""): component.src = species2 + "." + component.src
component.start = int( fields[5] ) - 1
end = int( fields[6] )
component.size = end - component.start
component.strand = fields[7]
component.text = seq2.strip()
alignment.add_component( component )
# add score
try:
alignment.score = int( fields[8] )
except:
try:
alignment.score = float( fields[8] )
except:
alignment.score = fields[8]
return alignment
def readline( file, skip_blank=False ):
"""Read a line from provided file, skipping any blank or comment lines"""
while 1:
line = file.readline()
if not line: return None
if line[0] != '#' and not ( skip_blank and line.isspace() ):
return line
|
import math
import pickle
import time
from functools import wraps
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage
### define profilers (https://stackoverflow.com/questions/3620943/measuring-elapsed-time-with-the-time-module)
PROF_DATA = {}
def profile(fn):
@wraps(fn)
def with_profiling(*args, **kwargs):
start_time = time.time()
ret = fn(*args, **kwargs)
elapsed_time = time.time() - start_time
if fn.__name__ not in PROF_DATA:
PROF_DATA[fn.__name__] = [0, []]
PROF_DATA[fn.__name__][0] += 1
PROF_DATA[fn.__name__][1].append(elapsed_time)
return ret
return with_profiling
def print_prof_data():
for fname, data in PROF_DATA.items():
max_time = max(data[1])
avg_time = sum(data[1]) / len(data[1])
print("Function %s called %d times. " % (fname, data[0]))
print('Execution time max: %.3f, average: %.3f' % (max_time, avg_time))
def clear_prof_data():
global PROF_DATA
PROF_DATA = {}
### end of profiler
class Gradient():
def __init__(self, sobel_kernel=3, sx_thresh=(20,100), s_thresh=(170,255)):
self.sobel_kernel = sobel_kernel
self.sx_thresh = sx_thresh
self.s_thresh = s_thresh
def setSobelKernel(self, sobel_kernel):
self.sobel_kernel = sobel_kernel
def setSThresh(self, s_thresh):
self.s_thresh = s_thresh
def setSXThresh(self,sx_thresh):
self.sx_thresh = sx_thresh
def preprocess(self, img):
pass
class AbsGrad(Gradient):
"""Calculate directional gradient"""
def preprocess(self, img):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Apply x direction
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=self.sobel_kernel)
abs_sobel = np.absolute(sobel)
scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= self.sx_thresh[0]) & (scaled_sobel <= self.sx_thresh[1])] = 1
return sxbinary
class MagGrad(Gradient):
"""Calculate gradient magnitude"""
def preprocess(self, img):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=self.sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=self.sobel_kernel)
# 3) Calculate the magnitude
mag = np.sqrt(np.square(sobelx) + np.square(sobely))
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_mag = np.uint8(255 * mag / np.max(mag))
# 5) Create a binary mask where mag thresholds are met
sxbinary = np.zeros_like(scaled_mag)
sxbinary[(scaled_mag >= self.sx_thresh[0]) & (scaled_mag <= self.sx_thresh[1])] = 1
return sxbinary
class DirGrad(Gradient):
"""Calculate gradient direction"""
def preprocess(self, img):
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=self.sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=self.sobel_kernel)
# 3) Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
dir = np.arctan2(abs_sobely, abs_sobelx)
# 5) Create a binary mask where direction thresholds are met
sxbinary = np.zeros_like(dir)
sxbinary[(dir >= self.sx_thresh[0]) & (dir <= self.sx_thresh[1])] = 1
# 6) Return this mask as your binary_output image
return sxbinary
class SXGrad(Gradient):
"""Calculate S-Channel and X directional sobel gradient"""
def preprocess(self, img):
# grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# sobel operation applied to x axis
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
# Get absolute values
abs_sobelx = np.absolute(sobelx)
# Scale to (0, 255)
scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= self.sx_thresh[0]) & (scaled_sobel <= self.sx_thresh[1])] = 1
# Get HLS color image
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
# Get s-channel
s_channel = hls[:, :, 2]
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= self.s_thresh[0]) & (s_channel <= self.s_thresh[1])] = 1
# Combine grayscale and color gradient
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return combined_binary
class SChannelGrad(Gradient):
def preprocess(self, img):
# Get HLS color image
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
# Get s-channel
s_channel = hls[:, :, 2]
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= self.s_thresh[0]) & (s_channel <= self.s_thresh[1])] = 1
return s_binary
class LightAutoGrad(Gradient):
def preprocess(self, img):
# grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
mean_gray = np.mean(gray)
# sobel operation applied to x axis
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
# Get absolute values
abs_sobelx = np.absolute(sobelx)
# Scale to (0, 255)
scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= self.sx_thresh[0]) & (scaled_sobel <= self.sx_thresh[1])] = 1
# Get HLS color image
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
if mean_gray > 80:
# Get s-channel
s_channel = hls[:, :, 2]
else:# Dark emvironment
# Get l-channel
s_channel = hls[:, :, 1]
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= self.s_thresh[0]) & (s_channel <= self.s_thresh[1])] = 1
# Combine grayscale and color gradient
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return combined_binary
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# Save n fits
self.N = 5
# Margin
self.margin = 50
# x values of the last n fits of the line
self.recent_xfitted = []
# average x values of the fitted line over the last n iterations
self.bestx = None
# polynomial coefficients of the last n iterations
self.fits = []
# polynomial coefficients averaged over the last n iterations
self.best_fit = None
# save polynomial coeffs of last iteration
self.last_fit = None
# polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
# radius of curvature of the line in some units
self.radius_of_curvature = None
# distance in meters of vehicle center from the line
self.line_base_pos = None
# difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
# x values for detected line pixels
self.allx = None
# y values for detected line pixels
self.ally = None
self.MaxFail = 5
self.fail_num = 0
def get_init_xy(self, base, binary_warped):
"""Get initial valid pixel coordinates"""
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0] / nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
x_current = base
# Set the width of the windows +/- margin
margin = self.margin
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_x_low = x_current - margin
win_x_high = x_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_x_low, win_y_low), (win_x_high, win_y_high),
(0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window
good_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_x_low) & (nonzerox < win_x_high)).nonzero()[0]
# Append these indices to the lists
lane_inds.append(good_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_inds) > minpix:
x_current = np.int(np.mean(nonzerox[good_inds]))
# Concatenate the arrays of indices
lane_inds = np.concatenate(lane_inds)
# Extract left and right line pixel positions
x = nonzerox[lane_inds]
y = nonzeroy[lane_inds]
self.allx = x
self.ally = y
def get_ctn_xy(self, binary_warped):
"""Get valid pixel coordinates from previous detection"""
# Update last fit
self.last_fit = self.current_fit
# we know where the lines are in the previous frame
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = self.margin
fit = self.current_fit
lane_inds = ((nonzerox > (fit[0] * (nonzeroy ** 2) + fit[1] * nonzeroy +
fit[2] - margin)) & (nonzerox < (fit[0] * (nonzeroy ** 2) +
fit[1] * nonzeroy + fit[
2] + margin)))
# Extract left and right line pixel positions
x = nonzerox[lane_inds]
y = nonzeroy[lane_inds]
self.allx = x
self.ally = y
def setNum(self, n):
self.N = n
def test(self):
print("Line prints here.")
class KalmanFilter():
"""Implement a simple Kalman filter"""
def __init__(self, n_in, q=(1,1,1), R=(1,1,1)):
self.P = np.array(q) # np.array([[q[0],0,0],[0,q[1],0],[0,0,q[2]]], np.float64)
self.R = np.array(R) # np.array([[R[0],0,0],[0,R[1],0],[0,0,R[2]]], np.float64)
self.K = None
self.P_pr = None
self.X = np.zeros((n_in, ), np.float64)
self.Init = False
def update(self, X_e):
if not self.Init:
self.Init = True
self.X = X_e
self.X_pr = self.X
self.P_pr = self.P
return self.X.flatten()
for i in range(3):
xe = X_e[i]
self.X_pr[i] = self.X[i]
K = self.P_pr[i]/(self.P_pr[i]+self.R[i])
self.X[i] = self.X_pr[i]+K*(X_e[i]-self.X_pr[i])
self.P[i] = (1-K)*self.P_pr[i]
return self.X.flatten()
X_e = X_e[:,None]
# time update
self.X_pr = self.X
self.P_pr = self.P
# time update
self.K = self.P_pr * np.linalg.inv(self.P_pr + self.R)
self.X = self.X_pr + np.matmul(self.K, (X_e-self.X_pr))
self.P = np.matmul((np.eye(3)-self.K), self.P_pr)
return self.X.flatten()
def updatebk(self, X_e):
if ~self.Init:
self.Init = True
self.X = X_e[:, None]
self.X_pr = self.X
return self.X.flatten()
X_e = X_e[:,None]
# time update
self.X_pr = self.X
self.P_pr = self.P
# time update
self.K = np.matmul(self.P_pr, np.linalg.inv(self.P_pr + self.R))
self.X = self.X_pr + np.matmul(self.K, (X_e-self.X_pr))
self.P = np.matmul((np.eye(3)-self.K), self.P_pr)
return self.X.flatten()
def setPR(self,P,R):
self.P = P
self.R = R
def printState(self):
print('X=', self.X)
class Detector():
def __init__(self, mtx, dist, M, Minv, sx_thresh=(20,100), s_thresh=(170,255)):
"""Initialization of lane line detector object"""
# Set camera calibration and warp perspective
self.CameraMatrix = mtx
self.Distortion = dist
self.WarpMatrix = M
self.WarpMatrixInv = Minv
self.Gradient = Gradient()
self.setBinaryFun(3)
self.LeftLine = Line()
self.RightLine = Line()
self.KFLeft = KalmanFilter(3, q=(4e-8, 1e-2, 100), R=(1e-4, 1, 10000))
self.KFRight = KalmanFilter(3, q=(4e-8, 1e-2, 100), R=(1e-4, 1, 10000))
self.UseKalmanFilter = True
# Set lane line detection uninitialized
self.InitializedLD = False
# cache the states of last 5 iterations
self.cacheNum = 5
self.ploty = None
self.distTop = 0
self.dist_to_left = 0
self.dist_to_center = 0
self.distButtom = 0
self.img = None
self.undist = None
self.margin = 50
# Define conversions in x and y from pixels space to meters
self.ym_per_pix = 30 / 720 # meters per pixel in y dimension
self.xm_per_pix = 3.7 / 700 # meters per pixel in x dimension
self.FitLeft = []
self.FitRight = []
self.debug = False
self.CurvtRatio = 0
# self.pool = ThreadPool(processes=1)
def setMargin(self,margin):
self.margin = margin
self.LeftLine.margin = margin
self.RightLine.margin =margin
def switchKF(self, status):
self.UseKalmanFilter = status
def setMaxFail(self, mf):
self.LeftLine.MaxFail = mf
self.RightLine.MaxFail = mf
def setBinaryFun(self, flag=0):
"""Set the method to generate binary gradient output of a RGB image"""
if flag==0:
self.Gradient = AbsGrad()
elif flag==1:
self.Gradient = MagGrad()
elif flag==2:
self.Gradient = DirGrad()
elif flag==3:
self.Gradient = SXGrad()
elif flag==4:
self.Gradient = SChannelGrad()
elif flag==5:
self.Gradient = LightAutoGrad()
else:
raise 'Invalid flag:'+str(flag)
@profile
def performBinary(self, img):
"""Get the binary gradient output of a RGB image"""
return self.Gradient.preprocess(img)
@profile
def get_xy_pvalue(self, binary_warped):
"""Get the xy pixel values for fitting"""
if self.LeftLine.detected:
self.LeftLine.get_ctn_xy(binary_warped)
else:
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
self.ploty = ploty
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
midpoint = np.int(histogram.shape[0] / 2)
leftx_base = np.argmax(histogram[:midpoint])
self.LeftLine.get_init_xy(leftx_base, binary_warped)
if self.RightLine.detected:
self.RightLine.get_ctn_xy(binary_warped)
else:
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
midpoint = np.int(histogram.shape[0] / 2)
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
self.RightLine.get_init_xy(rightx_base, binary_warped)
@profile
def detect(self, img):
"""Detect lane lines on an image"""
self.img = img
# preprocessing img
self.undist = self._undistort(img)
warped = self._warp(self.undist)
if self.debug:
db_gray = cv2.cvtColor(warped, cv2.COLOR_RGB2GRAY)
print('Mean gray = ', np.mean(db_gray))
binary_warped = self.performBinary(warped)
binary_warped = self._gaussian_blur(binary_warped)
# Get valid pixel coordinates
self.get_xy_pvalue(binary_warped)
# Fit a second order polynomial to each
left_fit = np.polyfit(self.LeftLine.ally, self.LeftLine.allx, 2)
right_fit = np.polyfit(self.RightLine.ally, self.RightLine.allx, 2)
if self.UseKalmanFilter:
left_fit = self.KFLeft.update(left_fit)
right_fit = self.KFRight.update(right_fit)
self.sanityCheck(left_fit, right_fit)
self.update()
output = self.visualizeInput()
binOut = self.visualizeDetection(binary_warped)
# height = output.shape[0]
width = output.shape[1]
scaleDown = 0.4
height_s = math.floor(binOut.shape[0]*scaleDown)
width_s = math.floor(binOut.shape[1]*scaleDown)
binOut = cv2.resize(binOut, (width_s, height_s))
output[0:height_s, (width-width_s):width,:] = binOut
return output
@profile
def visualizeDetection(self, img):
"""Plot the detection result on the warped binary image"""
# Create an image to draw on and an image to show the selection window
if len(img.shape) > 2:
out_img = np.copy(img)
else:
out_img = np.dstack((img, img, img)) * 255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
#out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
#out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
left_fit = self.LeftLine.current_fit
right_fit = self.RightLine.current_fit
ploty = self.ploty
left_fitx = self.LeftLine.recent_xfitted[-1]
right_fitx = self.RightLine.recent_xfitted[-1]
margin = self.margin
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
width = img.shape[1]
result[ploty.astype(np.int32), left_fitx.astype(np.int32) % width] = [255, 0, 0]
result[ploty.astype(np.int32), (left_fitx.astype(np.int32)-1) % width ] = [255, 0, 0]
result[ploty.astype(np.int32), (left_fitx.astype(np.int32)+1) % width] = [255, 0, 0]
result[ploty.astype(np.int32), right_fitx.astype(np.int32) % width] = [255, 0, 0]
result[ploty.astype(np.int32), (right_fitx.astype(np.int32)-1) % width] = [255, 0, 0]
result[ploty.astype(np.int32), (right_fitx.astype(np.int32)+1) % width] = [255, 0, 0]
result[self.LeftLine.ally, self.LeftLine.allx] = [0, 0, 255]
result[self.RightLine.ally, self.RightLine.allx] = [0, 0, 255]
if self.debug:
plt.imshow(result)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.show()
return result
@profile
def visualizeInput(self):
"""Plot the result on the input RGB image"""
# Create an image to draw the lines on
color_warp = np.zeros_like(self.img).astype(np.uint8)
#color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
left_fitx = self.LeftLine.recent_xfitted[-1]
right_fitx = self.RightLine.recent_xfitted[-1]
ploty = self.ploty
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = self._invwarp(color_warp)
# Combine the result with the original image
result = cv2.addWeighted(self.undist, 1, newwarp, 0.3, 0)
cv2.putText(result, "Radius of curvature = {:4d}m".format(math.floor(self.LeftLine.radius_of_curvature)),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, thickness=2)
cv2.putText(result, "Distance to center = {:3.2f}m".format(self.dist_to_center),
(10, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, thickness=2)
cv2.putText(result, "CvtRatio = {:3.2f}".format(self.CurvtRatio),
(10, 90), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, thickness=2)
cv2.putText(result, "Failed {:d} times".format(self.LeftLine.fail_num), (10, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, thickness=2)
if not self.InitializedLD:
cv2.putText(result, "Reset detection", (10, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, thickness=2)
return result.astype(np.uint8)
@profile
def getCurvature(self, left_fit, right_fit):
"""Calculate curvature of two lines"""
# Fit new polynomials to x,y in world space
ploty = self.ploty
ym_per_pix = self.ym_per_pix
xm_per_pix = self.xm_per_pix
leftx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
rightx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
y_eval = np.max(ploty)
left_fit_cr = np.polyfit(ploty * ym_per_pix, leftx * xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty * ym_per_pix, rightx * xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / \
np.absolute(2 * left_fit_cr[0])
right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / \
np.absolute(2 * right_fit_cr[0])
return left_curverad, right_curverad
def get_dist_to_left(self):
xm_per_pix = self.xm_per_pix
dist_to_left = xm_per_pix * (self.img.shape[1] / 2 - self.LeftLine.recent_xfitted[-1][-1])
return dist_to_left
def get_dist_to_center(self):
xm_per_pix = self.xm_per_pix
dist_to_top = xm_per_pix * (self.img.shape[1] / 2 -
(self.LeftLine.recent_xfitted[-1][-1] + self.RightLine.recent_xfitted[-1][-1])/2)
return dist_to_top
def distance(self):
print("The distance of two lines is .")
return 1.0
def sanityCheck(self, left_fit, right_fit):
self.LeftLine.diffs = left_fit - self.LeftLine.current_fit
self.RightLine.diffs = right_fit - self.RightLine.current_fit
curvts = self.getCurvature(left_fit, right_fit)
curvts = np.absolute(curvts)
ratio = np.max(curvts)/np.min(curvts)
self.CurvtRatio = ratio
ploty = self.ploty
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
dmax = np.max(np.absolute(left_fitx - right_fitx))
dmin = np.min(np.absolute(left_fitx - right_fitx))
self.LeftLine.current_fit = left_fit
self.RightLine.current_fit = right_fit
a1 = 0.1
a2 = 1 - a1
if ratio > 5 or (dmax > 900 or dmin < 400): # or (dmax-dmin)/dmin > 0.5
self.LeftLine.fail_num += 1
# Smooth the result
N = self.LeftLine.N
self.LeftLine.current_fit = a1 * self.LeftLine.current_fit + a2 * np.mean(np.array(self.LeftLine.fits),
axis=0)
self.RightLine.current_fit = a1 * self.RightLine.current_fit + a2 * np.mean(np.array(self.RightLine.fits),
axis=0)
else:
self.InitializedLD = True
self.LeftLine.detected = True
self.RightLine.detected = True
self.LeftLine.fail_num = 0
if len(self.LeftLine.recent_xfitted)>0:
if np.max(np.absolute(left_fitx - self.LeftLine.recent_xfitted[-1]))>70:
self.LeftLine.current_fit = a1 * self.LeftLine.current_fit + \
a2 * np.mean(np.array(self.LeftLine.fits), axis=0)
else:
self.LeftLine.current_fit = left_fit
self.LeftLine.recent_xfitted.append(left_fitx)
if len(self.RightLine.recent_xfitted)>0:
if np.max(np.absolute(right_fitx - self.RightLine.recent_xfitted[-1]))>70:
self.RightLine.current_fit = a1 * self.RightLine.current_fit + \
a2 * np.mean(np.array(self.RightLine.fits), axis=0)
else:
self.RightLine.current_fit = right_fit
self.RightLine.recent_xfitted.append(right_fitx)
# Pushback states
if len(self.LeftLine.fits) < self.LeftLine.N:
self.LeftLine.fits.append(self.LeftLine.current_fit)
else:
self.LeftLine.fits.pop(0)
self.LeftLine.fits.append(self.LeftLine.current_fit)
if len(self.RightLine.fits) < self.RightLine.N:
self.RightLine.fits.append(self.RightLine.current_fit)
else:
self.RightLine.fits.pop(0)
self.RightLine.fits.append(self.RightLine.current_fit)
def update(self):
self.FitLeft.append(self.LeftLine.current_fit)
self.FitRight.append(self.RightLine.current_fit)
curvts = self.getCurvature(self.LeftLine.current_fit, self.RightLine.current_fit)
self.LeftLine.radius_of_curvature = curvts[0]
self.RightLine.radius_of_curvature = curvts[1]
left_fit = self.LeftLine.current_fit
right_fit = self.RightLine.current_fit
ploty = self.ploty
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
if len(self.LeftLine.recent_xfitted) < self.LeftLine.N:
self.LeftLine.recent_xfitted.append(left_fitx)
else:
self.LeftLine.recent_xfitted.pop(0)
self.LeftLine.recent_xfitted.append(left_fitx)
if len(self.RightLine.recent_xfitted) < self.RightLine.N:
self.RightLine.recent_xfitted.append(right_fitx)
else:
self.RightLine.recent_xfitted.pop(0)
self.RightLine.recent_xfitted.append(right_fitx)
self.dist_to_left = self.get_dist_to_left()
self.dist_to_center = self.get_dist_to_center()
def setKF_PR(self,P,R):
self.KFLeft.setPR(P, R)
self.KFRight.setPR(P,R)
def _gaussian_blur(self, img, kernel_size=5):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
@profile
def _undistort(self, img):
"""Apply image undistortion"""
return cv2.undistort(img, self.CameraMatrix, self.Distortion, None, self.CameraMatrix)
@profile
def _warp(self, img):
"""Apply image warp transformation"""
return cv2.warpPerspective(img, self.WarpMatrix, (img.shape[1], img.shape[0]))
def _invwarp(self, img):
"""Apply image inverse warp transformation"""
return cv2.warpPerspective(img, self.WarpMatrixInv, (img.shape[1], img.shape[0]))
@profile
def _zoomImg(self, img, scale):
return scipy.ndimage.zoom(img, scale)
def plotFit(self):
self._plotFit(self.FitLeft)
self._plotFit(self.FitRight)
def _plotFit(self, fits):
x = np.array(fits)
L = x.shape[0]
t = np.arange(0,L,1)
plt.figure(1)
plt.subplot(311)
plt.plot(t, x[:, 0])
plt.title('fit[0]')
plt.grid(True)
plt.subplot(312)
plt.plot(t, x[:, 1])
plt.title('fit[1]')
plt.grid(True)
plt.subplot(313)
plt.plot(t, x[:, 2])
plt.title('fit[2]')
plt.grid(True)
plt.show()
def initDetection(self, binary_warped):
"""Initialize the detection"""
self.InitializedLD = True
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
midpoint = np.int(histogram.shape[0] / 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0] / nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = self.margin
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high),
(0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high),
(0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Update states of left and right lines
self.LeftLine.current_fit = left_fit
self.LeftLine.line_base_pos = leftx_base
self.LeftLine.allx = leftx
self.LeftLine.ally = lefty
self.RightLine.current_fit = right_fit
self.RightLine.line_base_pos = rightx_base
self.RightLine.allx = rightx
self.RightLine.ally = righty
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
self.ploty = ploty
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
self.ploty = ploty
self.LeftLine.recent_xfitted = left_fitx
self.RightLine.recent_xfitted = right_fitx
self.distTop = np.absolute(left_fitx[0] - right_fitx[0])
self.distButtom = np.absolute(left_fitx[-1] - right_fitx[0])
# Pushback states
if len(self.LeftLine.fits) < self.LeftLine.N:
self.LeftLine.fits.append(self.LeftLine.current_fit)
else:
self.LeftLine.fits.pop(0)
self.LeftLine.fits.append(self.LeftLine.current_fit)
if len(self.RightLine.fits) < self.RightLine.N:
self.RightLine.fits.append(self.RightLine.current_fit)
else:
self.RightLine.fits.pop(0)
self.RightLine.fits.append(self.RightLine.current_fit)
return out_img
@profile
def detectCtn(self, binary_warped):
"""Continuous detection, based on previous detection"""
# Update last fit
self.LeftLine.last_fit = self.LeftLine.current_fit
self.RightLine.last_fit = self.RightLine.current_fit
# we know where the lines are in the previous frame
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = self.margin
left_fit = self.LeftLine.current_fit
right_fit = self.RightLine.current_fit
left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0] * (nonzeroy ** 2) +
left_fit[1] * nonzeroy + left_fit[
2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0] * (nonzeroy ** 2) +
right_fit[1] * nonzeroy + right_fit[
2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = self.ploty
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
dmax = np.max(np.absolute(left_fitx - right_fitx))
dmin = np.min(np.absolute(left_fitx - right_fitx))
if (dmax-dmin)/dmin > 0.4:# Not parallel
# Reset detection
if self.LeftLine.fail_num > 0:
self.LeftLine.fail_num += 1
if self.LeftLine.fail_num == self.LeftLine.MaxFail:
self.InitializedLD = False
else:
# reset failure number to zero
self.LeftLine.fail_num = 0
if dmax > 900 or dmin < 500:
self.LeftLine.current_fit = np.mean(np.array(self.LeftLine.fits), axis=0)
self.RightLine.current_fit = np.mean(np.array(self.RightLine.fits), axis=0)
else:
# Detection is true and update states
if np.max(np.absolute(left_fitx - self.LeftLine.recent_xfitted))>70:
self.LeftLine.current_fit = np.mean(np.array(self.LeftLine.fits), axis=0) # self.LeftLine.last_fit
else:
self.LeftLine.current_fit = left_fit
self.LeftLine.recent_xfitted = left_fitx
if np.max(np.absolute(right_fitx - self.RightLine.recent_xfitted))>70:
self.RightLine.current_fit = np.mean(np.array(self.RightLine.fits), axis=0) # self.RightLine.last_fit
else:
self.RightLine.current_fit = right_fit
self.RightLine.recent_xfitted = right_fitx
# Pushback states
if len(self.LeftLine.fits) < self.LeftLine.N:
self.LeftLine.fits.append(self.LeftLine.current_fit)
else:
self.LeftLine.fits.pop(0)
self.LeftLine.fits.append(self.LeftLine.current_fit)
if len(self.RightLine.fits) < self.RightLine.N:
self.RightLine.fits.append(self.RightLine.current_fit)
else:
self.RightLine.fits.pop(0)
self.RightLine.fits.append(self.RightLine.current_fit)
# Smooth the result
self.LeftLine.current_fit = np.mean(np.array(self.LeftLine.fits), axis=0)#self.LeftLine.last_fit
self.RightLine.current_fit = np.mean(np.array(self.RightLine.fits), axis=0) # self.RightLine.last_fit
self.LeftLine.diffs = left_fit - self.LeftLine.current_fit
# self.LeftLine.current_fit = left_fit
self.LeftLine.allx = leftx
self.LeftLine.ally = lefty
self.RightLine.diffs = right_fit - self.RightLine.current_fit
# self.RightLine.current_fit = right_fit
self.RightLine.allx = rightx
self.RightLine.ally = righty
def test():
# Read in the saved camera matrix and distortion coefficients
dist_pickle = pickle.load( open( "camera_cal/wide_dist_pickle.p", "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
M = dist_pickle["M"]
Minv = dist_pickle["Minv"]
a = Detector(mtx=mtx, dist=dist, M=M, Minv=Minv)
a.setBinaryFun(flag=4)
a.distance()
a.LeftLine.test()
a.debug = True
img = mpimg.imread('test_images/straight_lines1.jpg')
#img = mpimg.imread('./frames/1043.jpg')
#img = mpimg.imread('./challenge_frames/0468.jpg')
#img = img.astype(np.uint8)
tmp = a.detect(img)
plt.imshow(tmp)
plt.show()
img = mpimg.imread('./frames/0557.jpg')
#img = mpimg.imread('test_images/straight_lines2.jpg')
tmp = a.detect(img)
plt.imshow(tmp)
plt.show()
print_prof_data()
#a.plotFit()
def test2():
kf = KalmanFilter(3)
kf.printState()
print('return ',kf.update(np.array([1.2,0.5,0.9])))
print('return ', kf.update(np.array([1.2, 0.5, 0.9])+0.1))
kf.printState()
def test3():
print('rt ', np.eye(3))
def testKF():
import pickle
dist_pickle = pickle.load(open("camera_cal/wide_dist_pickle.p", "rb"))
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
M = dist_pickle["M"]
Minv = dist_pickle["Minv"]
detector = Detector(mtx=mtx, dist=dist, M=M, Minv=Minv, sx_thresh=(20, 100), s_thresh=(170, 255))
q = [4, 4, 4]
R = [1, 1, 1]
detector.setKF_PR(q, R)
detector.setMargin(60)
detector.setBinaryFun(flag=5)
detector.switchKF(False)
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
white_output = 'output_images/project_output_v2_kf_tmp.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("./project_video.mp4").subclip(20,45)
# clip1 = VideoFileClip("./project_video.mp4").subclip(0,1)
clip1 = VideoFileClip("./challenge_video.mp4") # .subclip(18,48)
white_output = 'output_images/challenge_output_1.mp4'
white_clip = clip1.fl_image(detector.detect) #NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
#test3()
#test2()
#test()
#testKF()
|
from flask import Flask
from flask_cors import CORS
import os
app = Flask(__name__)
CORS(app)
@app.route('/')
@app.route('/api')
@app.route('/api/get')
async def invalid_request():
return 'Invalid request', 400
@app.route('/api/get/plugins', methods=['GET'])
async def plugins_list():
pluginsListDir = os.walk("../../../plugins")
pluginsListDirResp = [x[0] for x in os.walk(pluginsListDir)]
print(pluginsListDirResp)
@app.route('/api/dragoncord/update', methods=['GET'])
async def updater_update():
os.system('git reset --hard')
os.system('cd .. & cd .. & cd .. & git reset --hard')
return "Updated", 200
app.run(debug = True, port = 8723)
|
import sklearn, re, nltk, base64, json, urllib2, os
import numpy as np
import cPickle as pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
import os
MIN_RESULTS = 30 # Minimum number of results needed for valid user input
BASE_SEARCH_URL = 'https://api.twitter.com/1.1/search/tweets.json?'
class TweetMining(object):
def __init__(self, method = 'tf_idf_old'):
nltk.data.path.append('nltk_data/')
self.method = method
self.setup()
# Sets up Twitter API connection
def setup(self):
if os.path.isfile("config.py"):
config = {}
execfile("config.py", config)
consumer_key = config["consumer_key"]
consumer_secret = config["consumer_secret"]
elif os.path.isfile("project_template/config.py"):
config = {}
execfile("project_template/config.py", config)
consumer_key = config["consumer_key"]
consumer_secret = config["consumer_secret"]
else:
consumer_key = os.getenv('CONSUMER_KEY')
consumer_secret = os.getenv('CONSUMER_SECRET')
bearer_token = '%s:%s' % (consumer_key, consumer_secret)
bearer_token_64 = base64.b64encode(bearer_token)
token_request = urllib2.Request('https://api.twitter.com/oauth2/token')
token_request.add_header('Content-Type', 'application/x-www-form-urlencoded;charset=UTF-8')
token_request.add_header('Authorization', 'Basic %s' % bearer_token_64)
token_request.data = 'grant_type=client_credentials'
token_response = urllib2.urlopen(token_request)
token_contents = token_response.read()
token_data = json.loads(token_contents)
self.access_token = token_data['access_token']
if os.path.isfile("smaller_pho_dict.p"):
with open('smaller_pho_dict.p', 'rb') as handle:
self.dict = pickle.load(handle)
else:
with open('project_template/smaller_pho_dict.p', 'rb') as handle:
self.dict = pickle.load(handle)
if self.method == 'tf_idf_new':
if os.path.isfile("idf.pickle"):
with open('idf.pickle', 'rb') as handle:
self.idf = pickle.load(handle)
else:
with open('project_template/idf.pickle', 'rb') as handle:
self.idf = pickle.load(handle)
# Returns list of at most num_words topical words for the given hashtag_set
def get_topical_words(self, hashtag_set, num_words = 30):
hashtag_set = self.cleanup_tags(hashtag_set)
if self.method == 'tf_idf_old':
statuses = [t['text'] for t in self.get_tweets(hashtag_set, 100)]
if len(statuses) < MIN_RESULTS:
return []
self.process_tweets(statuses)
vect = TfidfVectorizer(min_df = 2, stop_words = 'english', strip_accents = 'ascii')
matrix = vect.fit_transform(statuses)
top_indices = np.argsort(vect.idf_)[::-1]
features = vect.get_feature_names()
return [features[i] for i in top_indices[:num_words]]
elif self.method == 'tf_idf_new':
statuses = [t['text'] for t in self.get_tweets(hashtag_set, 200 * len(hashtag_set))]
if len(statuses) < MIN_RESULTS:
return [], []
self.process_tweets(statuses, nouns_only = False)
getIDF = lambda word : self.idf[word] if word in self.idf else 0
vect = CountVectorizer(stop_words = 'english', strip_accents = 'ascii')
tf = vect.fit_transform([' '.join(statuses)]).toarray()
features = vect.get_feature_names()
idf_vals = np.array([np.log(1600000.0 / (1 + getIDF(word))) for word in features])
tfidf = np.multiply(tf, idf_vals)
top_indices = np.argsort(tfidf[0])[::-1]
max_tfidf = tfidf[0][top_indices[0]]
frequencies = [(features[i], 80 * (tfidf[0][i] / max_tfidf)) for i in top_indices[:40]]
top_words = [(word, max_tfidf * 1.01) for word in hashtag_set if word.upper() in self.dict and word not in features]
for i in top_indices:
word = features[i]
if not any(word in pair for pair in top_words) and word.upper() in self.dict:
top_words.append((word, tfidf[0][i]))
if len(top_words) == num_words:
break
return top_words, frequencies
else:
raise Exception('Error: Invalid method specified')
# Helper function for get_topical_words
# Cleans up hashtag list input by stripping hashtags if they exist
def cleanup_tags(self, hashtags):
return [h.strip(',').strip('#').strip() for h in hashtags]
# Helper function for get_topical_words
# Returns list of dicts; access "text" key to get status text
# hashtag_set is a list of hashtags to search for (don't include #)
def get_tweets(self, hashtag_set, num_tweets = 500):
num_queries = num_tweets / 100
extra_tweets = num_tweets % 100
base_query = BASE_SEARCH_URL + 'q='
for i in range(len(hashtag_set)):
base_query += '%23' + hashtag_set[i]
if i < len(hashtag_set) - 1:
base_query += '%20OR%20'
base_query += '&lang=en&result_type=recent&count=100'
def callAPI(query_url):
request = urllib2.Request(query_url)
request.add_header('Authorization', 'Bearer %s' % self.access_token)
response = urllib2.urlopen(request)
contents = response.read()
return json.loads(contents)
result = []
query = base_query
for q in range(num_queries):
statuses = callAPI(query)['statuses']
if statuses == []:
return []
result.extend(statuses)
minID = min([status['id'] for status in statuses])
query = base_query + '&max_id=' + str(minID)
if extra_tweets > 0 and not out_of_tweets:
query = re.sub(r'&count=\d+', '', query) + '&count=' + str(extra_tweets)
result.extend(callAPI(query)['statuses'])
return result
# Helper method for get_topical_words
# Processes statuses in-place by removing irrelevant components
def process_tweets(self, statuses, nouns_only = True):
for i in range(len(statuses)):
statuses[i] = re.sub(r'\S*/\S*', '', statuses[i]) # Links
statuses[i] = re.sub(r'htt\S*', '', statuses[i]) # Hanging https
statuses[i] = re.sub(r'#\S*', '', statuses[i]) # Hashtag symbols
statuses[i] = re.sub(r'(RT)*( )?@\S*', '', statuses[i]) # RT, @user
statuses[i] = re.sub(r'(RT |rt[^a-z])', '', statuses[i]) # RT/rt
statuses[i] = re.sub(r'\S*\d+\S*', '', statuses[i]) # Numerical
statuses[i] = re.sub(r"\w+'[^s ]+", '', statuses[i]) # Contractions
statuses[i] = re.sub(r'&\S+;', '', statuses[i]) # HTML entities
if nouns_only:
pos_info = nltk.pos_tag(nltk.word_tokenize(statuses[i]))
statuses[i] = ' '.join([word[0] for word in pos_info if 'NN' in word[1]])
|
import tweepy
import sys
import jsonpickle
import os
# Don't buffer stdout, so we can tail the log output redirected to a file
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
# API and ACCESS KEYS
API_KEY = sys.argv[1]
API_SECRET = sys.argv[2]
userIdfName = sys.argv[3]
outfName = sys.argv[4]
auth = tweepy.AppAuthHandler(API_KEY, API_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
if (not api):
print ("Can't Authenticate Bye!")
sys.exit(-1)
with open(userIdfName, 'r') as inp:
userIds = [line.rstrip('\n') for line in inp]
numUsers = len(userIds)
print("Going to query {0} users".format(numUsers))
usersQueried = 0
with open(outfName, 'w') as out:
while (usersQueried < numUsers):
batch = userIds[usersQueried:min(usersQueried+100, numUsers)]
usersQueried += 100
print("Going to Query {0} users".format(len(batch)))
users = api.lookup_users(user_ids=batch)
print("Got Back {0} users".format(len(users)))
for user in users:
out.write(jsonpickle.encode(user, unpicklable=False)+'\n')
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CreateSnapshotGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateSnapshotGroup','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_InstantAccess(self): # Boolean
return self.get_query_params().get('InstantAccess')
def set_InstantAccess(self, InstantAccess): # Boolean
self.add_query_param('InstantAccess', InstantAccess)
def get_ExcludeDiskIds(self): # RepeatList
return self.get_query_params().get('ExcludeDiskId')
def set_ExcludeDiskIds(self, ExcludeDiskId): # RepeatList
for depth1 in range(len(ExcludeDiskId)):
self.add_query_param('ExcludeDiskId.' + str(depth1 + 1), ExcludeDiskId[depth1])
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_InstantAccessRetentionDays(self): # Integer
return self.get_query_params().get('InstantAccessRetentionDays')
def set_InstantAccessRetentionDays(self, InstantAccessRetentionDays): # Integer
self.add_query_param('InstantAccessRetentionDays', InstantAccessRetentionDays)
def get_DiskIds(self): # RepeatList
return self.get_query_params().get('DiskId')
def set_DiskIds(self, DiskId): # RepeatList
for depth1 in range(len(DiskId)):
self.add_query_param('DiskId.' + str(depth1 + 1), DiskId[depth1])
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
|
from conans import ConanFile, CMake, tools
class NlohmannjsonConan(ConanFile):
name = "nlohmann_json"
version = "3.1.2"
license = "MIT"
url = "https://github.com/elshize/conan-nlohmann_json"
code_url = "https://github.com/nlohmann/json"
description = "JSON for Modern C++"
build_policy = "always"
def source(self):
self.run("git clone %s --depth=1" % self.code_url)
def package(self):
self.copy("*.hpp", dst="include", src="json/include")
|
"""
Command line utility to remove files through glob patterns.
"""
import argparse
import glob
from os import path, remove
from sys import exit
from denverapi.colored_text import print
def main():
parser = argparse.ArgumentParser("rmr")
parser.add_argument(
"file", help="the file to remove (can be a glob pattern)", nargs="*", default=[]
)
args = parser.parse_args()
if not isinstance(args.file, list):
args.file = [args.file]
for file in args.file:
for x in glob.iglob(file, recursive=False):
try:
if path.isfile(x):
remove(x)
else:
print(f"Directory: {x} is a directory", fore="red")
except PermissionError:
print(f"Permission Denied: {x}", fore="red")
exit(1)
if __name__ == "__main__":
main()
|
d = {'x':1,'y':2,'z':3}
def a(x,y,z):
return x,y,z
print "\nFunction"
print a(1,2,3)
print a(z=3,x=1,y=2), a(z=3,y=2,x=1), a(y=2,z=3,x=1), a(y=2,x=1,z=3)
def b(x=0,y=0,z=0):
return x,y,z
print "\nFunction with defaults"
print b()
print b(1,2,3)
print b(1), b(2), b(3)
print b(x=1), b(y=2), b(z=3)
print b(x=1,z=3), b(z=3,x=1)
print b(x=1,y=2), b(y=2,x=1)
print b(z=3,y=2), b(y=2,z=3)
print b(z=3,x=1,y=2), b(z=3,y=2,x=1), b(y=2,z=3,x=1), b(y=2,x=1,z=3)
class A():
def __init__(self,x,y,z):
self.x = x
self.y = y
self.z = z
def __str__(self):
return str((self.x,self.y,self.z))
print "\nClass"
print A(1,2,3)
class B():
def __init__(self,x=0,y=0,z=0):
self.x = x
self.y = y
self.z = z
def __str__(self):
return str((self.x,self.y,self.z))
print "\nClass with defaults"
print B()
print B(1,2,3)
print B(1), B(2), B(3)
|
from . import callbacks, metrics, nlp_utils
|
import multiprocessing
import os
import time
from multiprocessing import freeze_support
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from datetime import datetime, timedelta
import runpy
def run_py():
runpy.run_path(path_name='main.py')
def rerun_script():
global p
p.terminate()
p = multiprocessing.Process(target=run_py)
p.start()
class MyHandler(FileSystemEventHandler):
def __init__(self):
self.last_modified = datetime.now()
def on_modified(self, event):
if datetime.now() - self.last_modified < timedelta(seconds=1):
return
else:
self.last_modified = datetime.now()
path: str = event.src_path.replace("~", "")
if path.endswith(".py"):
os.system('cls')
rerun_script()
if __name__ == "__main__":
freeze_support()
p = multiprocessing.Process(target=run_py)
p.start()
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, path='.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Split API."""
from tensorflow_datasets import testing
from tensorflow_datasets.core import proto
from tensorflow_datasets.core import splits
from tensorflow_datasets.core.utils import shard_utils
import tensorflow_datasets.public_api as tfds
RANGE_TRAIN = list(range(0, 2000))
RANGE_TEST = list(range(3000, 3200))
RANGE_VAL = list(range(6000, 6010))
class SplitDictTest(testing.TestCase):
def test_num_shards(self):
si = tfds.core.SplitInfo(name="train", shard_lengths=[1, 2, 3], num_bytes=0)
sd = splits.SplitDict([si], dataset_name="ds_name")
self.assertEqual(sd["train"].num_shards, 3)
class SplitsDictTest(testing.TestCase):
@property
def split_dict(self):
si = [
tfds.core.SplitInfo(name="train", shard_lengths=[10, 10], num_bytes=0),
tfds.core.SplitInfo(name="test", shard_lengths=[1], num_bytes=0),
]
sd = splits.SplitDict(si, dataset_name="ds_name")
return sd
def test_get(self):
s = self.split_dict["train"]
self.assertEqual("train", s.name)
self.assertEqual(2, s.num_shards)
def test_from_proto(self):
sd = splits.SplitDict.from_proto(
"ds_name", [
proto.SplitInfo(name="validation", shard_lengths=[5], num_bytes=0)
])
self.assertIn("validation", sd)
self.assertNotIn("train", sd)
self.assertNotIn("test", sd)
def test_to_proto(self):
sd = self.split_dict
sdp = sd.to_proto()
# Split order is preserved
self.assertEqual("train", sdp[0].name)
self.assertEqual([10, 10], sdp[0].shard_lengths)
self.assertEqual("test", sdp[1].name)
self.assertEqual([1], sdp[1].shard_lengths)
def test_bool(self):
sd = splits.SplitDict([], dataset_name="ds_name")
self.assertFalse(sd) # Empty split is False
si = [tfds.core.SplitInfo(name="train", shard_lengths=[5], num_bytes=0)]
sd = splits.SplitDict(si, dataset_name="ds_name")
self.assertTrue(sd) # Non-empty split is True
class SplitsTest(testing.TestCase):
@classmethod
def setUpClass(cls):
super(SplitsTest, cls).setUpClass()
cls._builder = testing.DummyDatasetSharedGenerator(
data_dir=testing.make_tmp_dir())
cls._builder.download_and_prepare()
def test_sub_split_num_examples(self):
s = self._builder.info.splits
self.assertEqual(s["train[75%:]"].num_examples, 5)
self.assertEqual(s["train[:75%]"].num_examples, 15)
self.assertEqual(
s["train"].num_examples,
s["train[75%:]"].num_examples + s["train[:75%]"].num_examples,
)
self.assertEqual(s["test[75%:]"].num_examples, 2)
self.assertEqual(s["test[:75%]"].num_examples, 8)
self.assertEqual(
s["test"].num_examples,
s["test[75%:]"].num_examples + s["test[:75%]"].num_examples,
)
def test_sub_split_file_instructions(self):
fi = self._builder.info.splits["train[75%:]"].file_instructions
self.assertEqual(fi, [shard_utils.FileInstruction(
filename="dummy_dataset_shared_generator-train.tfrecord-00000-of-00001",
skip=15,
take=-1,
num_examples=5,
)])
def test_split_file_instructions(self):
fi = self._builder.info.splits["train"].file_instructions
self.assertEqual(fi, [shard_utils.FileInstruction(
filename="dummy_dataset_shared_generator-train.tfrecord-00000-of-00001",
skip=0,
take=-1,
num_examples=20,
)])
def test_sub_split_filenames(self):
self.assertEqual(self._builder.info.splits["train"].filenames, [
"dummy_dataset_shared_generator-train.tfrecord-00000-of-00001",
])
self.assertEqual(self._builder.info.splits["train[75%:]"].filenames, [
"dummy_dataset_shared_generator-train.tfrecord-00000-of-00001",
])
def test_sub_split_wrong_key(self):
with self.assertRaisesWithPredicateMatch(
ValueError, "Unknown split \"unknown\""):
_ = self._builder.info.splits["unknown"]
def test_split_enum(self):
self.assertEqual(repr(splits.Split.TRAIN), "Split('train')")
self.assertIsInstance(splits.Split.TRAIN, splits.Split)
def test_even_splits(self):
self.assertEqual(
["train[0%:33%]", "train[33%:67%]", "train[67%:100%]"],
splits.even_splits("train", n=3),
)
self.assertEqual([
"train[0%:25%]", "train[25%:50%]", "train[50%:75%]", "train[75%:100%]"
], splits.even_splits("train", 4))
with self.assertRaises(ValueError):
splits.even_splits("train", 0)
with self.assertRaises(ValueError):
splits.even_splits("train", 101)
if __name__ == "__main__":
testing.test_main()
|
# Copyright 2012 Ning Ke
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import struct
import cPickle
import sys
import pstructstor
import oid
import ptrie
# Use PDSCache for internal Ptrie. Comment out the import line below if not
# using pdscache.
import pdscache
##
# TODO
# Pstructstor's garbage collector does not look at oidfs for reference to
# OIDs. This means that if you a PStructStor.keepOids() you must follow with
# a OidFS.save() call. Otherwise any previously stored oids will be stale,
# because those OIDs moved as a result of the garbage collection.
#
class OidFS(object):
''' Stores saved Oids within a file system.
Oids are saved using a Trie data structure (Ptrie) '''
rootoid_filename = "root-oid"
oidtable_pstor_dir = "pds-storage"
def _getPStor(self, pstorpath):
''' Creates a PStructStor to store Oids '''
if not os.path.isdir(pstorpath):
os.mkdir(pstorpath)
return pstructstor.PStructStor.mkpstor(pstorpath)
def _writeRootoid(self):
''' writes the root OID to file '''
rootoid = self._rootoid
# If PDSCache in use then write through the coid first
if isinstance(rootoid, pdscache._CachedOid):
rootoid = pdscache.write_coid(rootoid)
rootoidPath = os.path.join(self._storpath, OidFS.rootoid_filename)
fobj = open(rootoidPath, "w")
cPickle.dump(rootoid, fobj, 2)
fobj.close()
def _readRootoid(self):
''' Reads and return oid root from file '''
# unpickle
rootoidPath = os.path.join(self._storpath, OidFS.rootoid_filename)
fobj = open(rootoidPath, "r")
rootoid = cPickle.load(fobj)
fobj.close()
# If PDSCache in use then convert to coid
if rootoid is not oid.OID.Nulloid:
rootoid = pdscache.read_oid(rootoid)
return rootoid
def __init__(self, storpath):
if not os.path.isabs(storpath):
raise ValueError("storpath for OidFS must be absolute")
if not os.path.isdir(storpath):
os.mkdir(storpath)
self._storpath = storpath
# Get a PStructStor to store our OID Ptrie
pstorPath = os.path.join(storpath, OidFS.oidtable_pstor_dir)
self._oidPstor = self._getPStor(pstorPath)
# Use a Ptrie as our oid table
self._ptrieObj = ptrie.Ptrie(self._oidPstor)
# Create or find the root of OID trie: The root OID is saved in a file
rootoidPath = os.path.join(storpath, OidFS.rootoid_filename)
if not os.path.exists(rootoidPath):
# Create OID root, use OID.Nulloid as the root of an empty OID table
self._rootoid = ptrie.Nulltrie
self._writeRootoid()
else:
# load rootoid from file
self._rootoid = self._readRootoid()
def close(self):
#self.gc()
self._oidPstor.close()
def _store(self, o, oname):
''' Store an OID as oidname in database but don't write rootoid yet. '''
#print "Saving %s into %s" % (o, self._rootoid)
# If using pdscache then o is actually a coid.
if isinstance(o, pdscache._CachedOid):
o = pdscache.write_coid(o)
self._rootoid = self._ptrieObj.insert(self._rootoid, oname, o)
def store(self, o, oname):
''' Store an OID as oname in our database '''
self._store(o, oname)
self._writeRootoid()
def load(self, oidname):
''' Load the OID with ''oidname'' (that was used to save the OID
originally) from our database. '''
oidnode = self._ptrieObj.find(self._rootoid, oidname)
if not oidnode:
return oidnode
fields = self._ptrieObj.getfields(oidnode)
coid = fields['value']
return coid
def delete(self, oidname):
self._rootoid = self._ptrieObj.delete(self._rootoid, oidname)
self._writeRootoid()
def _collect_pstor(self):
''' Run GC on OID's pstructstor. OIDs stored in OidFS will be moved
as a result. Note this function assumes that stored oids can belong
to different PStors, which is currently allowed. In the future,
PStor should probably take possession of OidFS so that there is only
one PStor in a single OidFS. '''
# Since PStor's GC function moves OIDs, we have to make a new OID
# ptrie with the same oidname and new OID references.
pstordict = {}
for orec in self.oriter():
oname, o = orec
if isinstance(o, pdscache._CachedOid):
# o is type 'coid' and o.pstor is a PStructStor object
pstor = o.pstor
else:
# o is type 'OID' and o.pstor is a string
pstor = pstructstor.PStructStor.mkpstor(o.pstor)
if pstor not in pstordict:
# pstor dictionary's value is a (onames, ovalues) pair
pstordict[pstor] = [[], []]
onames, ovalues = pstordict[pstor]
onames.append(oname)
if isinstance(o, pdscache._CachedOid):
# Must convert back to real Oid
o = pdscache.write_coid(o)
ovalues.append(o)
if not len(pstordict):
return
# Now send to pstructstor for GC and essentially re-create our
# internal oid ptrie with new oid values
for pstor in pstordict:
onames, ovalues = pstordict[pstor]
pstordict[pstor][1] = ovalues = pstor.keepOids(ovalues)
for oname, o in zip(onames, ovalues):
self._store(o, oname)
self._writeRootoid()
def gc(self):
''' Garbage collects OidFS's internal Ptrie PStor. Saving only
self._rootoid. '''
# Run GC on OID's pstor first.
self._collect_pstor()
# Save oidfs's _rootoid
o = self._rootoid
if isinstance(self._rootoid, pdscache._CachedOid):
o = pdscache.write_coid(self._rootoid)
o, = self._oidPstor.keepOids([o])
o = pdscache.read_oid(o)
self._rootoid = o
self._writeRootoid()
def oriter(self):
''' Oid record 'orec' iterator - traverses OidFS In depth-first
(alphabetical) order. An Oid record is an (oidname, oid) tuple.'''
for node in self._ptrieObj.dfiter(self._rootoid):
f = self._ptrieObj.getfields(node)
if f['final']:
yield (f['prefix'], f['value'],)
|
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
from .. import __author__, __version__
import win32com.client as x32
import pythoncom
|
# Copyright (c) 2017-2021 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
import uuid
from django.apps import apps
from django.contrib.auth.models import User
from django.contrib.gis.db import models
from django.contrib.postgres.fields import JSONField
from django.db.models.signals import pre_init
from django.dispatch import receiver
from django.utils import timezone
from idgo_admin.models.mail import send_extraction_failure_mail
from idgo_admin.models.mail import send_extraction_successfully_mail
class ExtractorSupportedFormat(models.Model):
class Meta(object):
verbose_name = "Format du service d'extraction"
verbose_name_plural = "Formats du service d'extraction"
name = models.SlugField(
verbose_name="Nom",
editable=False,
primary_key=True,
)
description = models.TextField(
verbose_name="Description",
unique=True,
)
details = JSONField(
verbose_name="Détails",
)
TYPE_CHOICES = (
('raster', 'raster'),
('vector', 'vector'),
)
type = models.CharField(
verbose_name="Type",
max_length=6,
null=True,
blank=True,
choices=TYPE_CHOICES,
)
def __str__(self):
return self.description
class AsyncExtractorTask(models.Model):
class Meta(object):
verbose_name = "Tâche de l'extracteur"
verbose_name_plural = "Tâches de l'extracteur"
uuid = models.UUIDField(
editable=False,
primary_key=True,
default=uuid.uuid4,
)
user = models.ForeignKey(
to=User,
)
foreign_value = models.CharField(
max_length=100,
null=True,
blank=True,
)
foreign_field = models.CharField(
max_length=100,
null=True,
blank=True,
)
app_label = models.CharField(
max_length=100,
null=True,
blank=True,
default='idgo_admin'
)
model = models.CharField(
max_length=100,
null=True,
blank=True,
)
success = models.NullBooleanField(
)
submission_datetime = models.DateTimeField(
null=True,
blank=True,
)
start_datetime = models.DateTimeField(
null=True,
blank=True,
)
stop_datetime = models.DateTimeField(
null=True,
blank=True,
)
query = JSONField(
null=True,
blank=True,
)
details = JSONField(
null=True,
blank=True,
)
def __str__(self):
return self.target_object.__str__()
@property
def status(self):
if self.success is True:
return 'Succès'
elif self.success is False:
return 'Échec'
elif self.success is None and not self.start_datetime:
return 'En attente'
elif self.success is None and self.start_datetime:
return 'En cours'
else:
return 'Inconnu'
@property
def elapsed_time(self):
if self.stop_datetime and self.success in (True, False):
return self.stop_datetime - self.submission_datetime
else:
return timezone.now() - self.submission_datetime
@property
def target_object(self):
Model = apps.get_model(app_label=self.app_label, model_name=self.model)
return Model.objects.get(**{self.foreign_field: self.foreign_value})
@receiver(pre_init, sender=AsyncExtractorTask)
def synchronize_extractor_task(sender, *args, **kwargs):
pre_init.disconnect(synchronize_extractor_task, sender=sender)
doc = sender.__dict__.get('__doc__')
if doc.startswith(sender.__name__):
keys = doc[len(sender.__name__) + 1:-1].split(', ')
values = kwargs.get('args')
if len(keys) == len(values):
kvp = dict((k, values[i]) for i, k in enumerate(keys))
try:
instance = AsyncExtractorTask.objects.get(uuid=kvp['uuid'])
except AsyncExtractorTask.DoesNotExist:
pass
else:
if instance.success is None:
url = instance.details['possible_requests']['status']['url']
r = requests.get(url)
if r.status_code == 200:
details = instance.details
details.update(r.json())
instance.success = {
'SUCCESS': True,
'FAILURE': False,
}.get(details['status'], None)
instance.details = details
if instance.success is False:
instance.stop_datetime = timezone.now()
else:
instance.stop_datetime = details.get('end_datetime')
instance.start_datetime = \
details.get('start_datetime') or instance.stop_datetime
instance.save()
if instance.success is True:
send_extraction_successfully_mail(instance.user, instance)
elif instance.success is False:
send_extraction_failure_mail(instance.user, instance)
pre_init.connect(synchronize_extractor_task, sender=sender)
|
#!/home/pi/Python-3.8.5
"""
For modifications and text not covered by other licences:
Original software Copyright (C) 2020 Ward Hills
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import asyncio
import configparser
# import ard_ser
import pico_svr
# TODO set this to run when the pi boots
#
# Read config file
config = configparser.ConfigParser()
config.read('config.ini')
async def handle_echo(reader, writer):
# n = int(config['DATA']['characters'])
n = 1000
data = await reader.read(n)
# logging.debug('reader read {}'.format(message) )
# sensor = ard_ser.read(output_format='json')
sensor = pico_svr.read(output_format='json')
data = sensor
# print('Received', message, 'from',addr)
# print('Send: ' , message)
writer.write(data)
await writer.drain()
# print("Close the connection")
writer.close()
async def main():
# server = await asyncio.start_server(
# handle_echo, '169.254.162.167', 8888)
server = await asyncio.start_server(
handle_echo, 'triscopepi.local', 8888)
# TODO change the IP address to host name
# hostname = socket.gethostname()
# print(hostname)
# ipaddr = socket.gethostbyname(hostname+'.local')
# ipaddr = socket.gethostbyname(hostname)
# print(ipaddr)
# server = await asyncio.start_server(
# handle_echo, ipaddr, 8888)
addr = server.sockets[0].getsockname()
print('Serving on ', addr)
async with server:
await server.serve_forever()
asyncio.run(main())
|
import json
import pytest
from gidgetlab import RedirectionException
from gidgetlab import abc as gl_abc
class MockGitLabAPI(gl_abc.GitLabAPI):
DEFAULT_HEADERS = {
"ratelimit-limit": "2",
"ratelimit-remaining": "1",
"ratelimit-reset": "0",
"content-type": "application/json",
}
def __init__(
self,
status_code=200,
headers=DEFAULT_HEADERS,
body=b"",
*,
url="https://gitlab.com",
api_version="v4",
cache=None,
):
self.response_code = status_code
self.response_headers = headers
self.response_body = body
super().__init__(
"test_abc",
access_token="access token",
url=url,
api_version=api_version,
cache=cache,
)
async def _request(self, method, url, headers, body=b""):
"""Make an HTTP request."""
self.method = method
self.url = url
self.headers = headers
self.body = body
response_headers = self.response_headers.copy()
try:
# Don't loop forever.
del self.response_headers["link"]
except KeyError:
pass
return self.response_code, response_headers, self.response_body
async def sleep(self, seconds): # pragma: no cover
"""Sleep for the specified number of seconds."""
self.slept = seconds
@pytest.mark.asyncio
async def test_url_formatted():
"""The URL is appropriately formatted."""
gl = MockGitLabAPI()
await gl._make_request("GET", "/groups/gitlab-org/projects", {}, "")
assert gl.url == "https://gitlab.com/api/v4/groups/gitlab-org/projects"
@pytest.mark.asyncio
async def test_headers():
"""Appropriate headers are created."""
gl = MockGitLabAPI()
await gl._make_request("GET", "/version", {}, "")
assert gl.headers["user-agent"] == "test_abc"
assert gl.headers["accept"] == "application/json"
assert gl.headers["private-token"] == "access token"
@pytest.mark.asyncio
async def test_rate_limit_set():
"""The rate limit is updated after receiving a response."""
rate_headers = {
"ratelimit-limit": "42",
"ratelimit-remaining": "1",
"ratelimit-reset": "0",
}
gl = MockGitLabAPI(headers=rate_headers)
await gl._make_request("GET", "/rate_limit", {}, "")
assert gl.rate_limit.limit == 42
@pytest.mark.asyncio
async def test_decoding():
"""Test that appropriate decoding occurs."""
original_data = {"hello": "world"}
headers = MockGitLabAPI.DEFAULT_HEADERS.copy()
headers["content-type"] = "application/json; charset=utf-8"
gl = MockGitLabAPI(headers=headers, body=json.dumps(original_data).encode("utf8"))
data, _ = await gl._make_request("GET", "/rate_limit", {}, "")
assert data == original_data
@pytest.mark.asyncio
async def test_more():
"""The 'next' link is returned appropriately."""
headers = MockGitLabAPI.DEFAULT_HEADERS.copy()
headers["link"] = "<https://gitlab.com/api/v4/fake?page=2>; " 'rel="next"'
gl = MockGitLabAPI(headers=headers)
_, more = await gl._make_request("GET", "/fake", {}, "")
assert more == "https://gitlab.com/api/v4/fake?page=2"
@pytest.mark.asyncio
async def test_getitem():
original_data = {"hello": "world"}
headers = MockGitLabAPI.DEFAULT_HEADERS.copy()
headers["content-type"] = "application/json; charset=UTF-8"
gl = MockGitLabAPI(headers=headers, body=json.dumps(original_data).encode("utf8"))
data = await gl.getitem("/fake")
assert gl.method == "GET"
assert data == original_data
@pytest.mark.asyncio
async def test_getiter():
"""Test that getiter() returns an async iterable as well as query string params."""
original_data = [1, 2]
next_url = "https://gitlab.com/api/v4/fake?page=2"
headers = MockGitLabAPI.DEFAULT_HEADERS.copy()
headers["content-type"] = "application/json; charset=UTF-8"
headers["link"] = f'<{next_url}>; rel="next"'
gl = MockGitLabAPI(headers=headers, body=json.dumps(original_data).encode("utf8"))
data = []
async for item in gl.getiter("/fake", {"foo": "stuff"}):
data.append(item)
assert gl.method == "GET"
assert gl.url == "https://gitlab.com/api/v4/fake?page=2&foo=stuff"
assert len(data) == 4
assert data[0] == 1
assert data[1] == 2
assert data[2] == 1
assert data[3] == 2
@pytest.mark.asyncio
async def test_post():
send = [1, 2, 3]
send_json = json.dumps(send).encode("utf-8")
receive = {"hello": "world"}
headers = MockGitLabAPI.DEFAULT_HEADERS.copy()
headers["content-type"] = "application/json; charset=utf-8"
gl = MockGitLabAPI(headers=headers, body=json.dumps(receive).encode("utf-8"))
await gl.post("/fake", data=send)
assert gl.method == "POST"
assert gl.headers["content-type"] == "application/json; charset=utf-8"
assert gl.body == send_json
assert gl.headers["content-length"] == str(len(send_json))
@pytest.mark.asyncio
async def test_patch():
send = [1, 2, 3]
send_json = json.dumps(send).encode("utf-8")
receive = {"hello": "world"}
headers = MockGitLabAPI.DEFAULT_HEADERS.copy()
headers["content-type"] = "application/json; charset=utf-8"
gl = MockGitLabAPI(headers=headers, body=json.dumps(receive).encode("utf-8"))
await gl.patch("/fake", data=send)
assert gl.method == "PATCH"
assert gl.headers["content-type"] == "application/json; charset=utf-8"
assert gl.body == send_json
assert gl.headers["content-length"] == str(len(send_json))
@pytest.mark.asyncio
async def test_put():
send = [1, 2, 3]
send_json = json.dumps(send).encode("utf-8")
receive = {"hello": "world"}
headers = MockGitLabAPI.DEFAULT_HEADERS.copy()
headers["content-type"] = "application/json; charset=utf-8"
gl = MockGitLabAPI(headers=headers, body=json.dumps(receive).encode("utf-8"))
await gl.put("/fake", data=send)
assert gl.method == "PUT"
assert gl.headers["content-type"] == "application/json; charset=utf-8"
assert gl.body == send_json
assert gl.headers["content-length"] == str(len(send_json))
@pytest.mark.asyncio
async def test_delete():
send = [1, 2, 3]
send_json = json.dumps(send).encode("utf-8")
receive = {"hello": "world"}
headers = MockGitLabAPI.DEFAULT_HEADERS.copy()
headers["content-type"] = "application/json; charset=utf-8"
gl = MockGitLabAPI(headers=headers, body=json.dumps(receive).encode("utf-8"))
await gl.delete("/fake", data=send)
assert gl.method == "DELETE"
assert gl.headers["content-type"] == "application/json; charset=utf-8"
assert gl.body == send_json
assert gl.headers["content-length"] == str(len(send_json))
class TestCache:
@pytest.mark.asyncio
async def test_if_none_match_sent(self):
etag = "12345"
cache = {"https://gitlab.com/api/v4/fake": (etag, None, "hi", None)}
gl = MockGitLabAPI(cache=cache)
await gl.getitem("/fake")
assert "if-none-match" in gl.headers
assert gl.headers["if-none-match"] == etag
@pytest.mark.asyncio
async def test_etag_received(self):
cache = {}
etag = "12345"
headers = MockGitLabAPI.DEFAULT_HEADERS.copy()
headers["etag"] = etag
gl = MockGitLabAPI(200, headers, b"42", cache=cache)
data = await gl.getitem("/fake")
url = "https://gitlab.com/api/v4/fake"
assert url in cache
assert cache[url] == (etag, None, 42, None)
assert data == cache[url][2]
@pytest.mark.asyncio
async def test_if_modified_since_sent(self):
last_modified = "12345"
cache = {"https://gitlab.com/api/v4/fake": (None, last_modified, "hi", None)}
gl = MockGitLabAPI(cache=cache)
await gl.getitem("/fake")
assert "if-modified-since" in gl.headers
assert gl.headers["if-modified-since"] == last_modified
@pytest.mark.asyncio
async def test_last_modified_received(self):
cache = {}
last_modified = "12345"
headers = MockGitLabAPI.DEFAULT_HEADERS.copy()
headers["last-modified"] = last_modified
gl = MockGitLabAPI(200, headers, b"42", cache=cache)
data = await gl.getitem("/fake")
url = "https://gitlab.com/api/v4/fake"
assert url in cache
assert cache[url] == (None, last_modified, 42, None)
assert data == cache[url][2]
@pytest.mark.asyncio
async def test_hit(self):
url = "https://gitlab.com/api/v4/fake"
cache = {url: ("12345", "67890", 42, None)}
gl = MockGitLabAPI(304, cache=cache)
data = await gl.getitem(url)
assert data == 42
@pytest.mark.asyncio
async def test_miss(self):
url = "https://gitlab.com/api/v4/fake"
cache = {url: ("12345", "67890", 42, None)}
headers = MockGitLabAPI.DEFAULT_HEADERS.copy()
headers["etag"] = "09876"
headers["last-modified"] = "54321"
gl = MockGitLabAPI(200, headers, body=b"-13", cache=cache)
data = await gl.getitem(url)
assert data == -13
assert cache[url] == ("09876", "54321", -13, None)
@pytest.mark.asyncio
async def test_ineligible(self):
cache = {}
gl = MockGitLabAPI(cache=cache)
url = "https://gitlab.com/api/v4/fake"
# Only way to force a GET request with a body.
await gl._make_request("GET", url, {}, 42)
assert url not in cache
await gl.post(url, data=42)
assert url not in cache
@pytest.mark.asyncio
async def test_redirect_without_cache(self):
cache = {}
gl = MockGitLabAPI(304, cache=cache)
with pytest.raises(RedirectionException):
await gl.getitem("/fake")
@pytest.mark.asyncio
async def test_no_cache(self):
headers = MockGitLabAPI.DEFAULT_HEADERS.copy()
headers["etag"] = "09876"
headers["last-modified"] = "54321"
gl = MockGitLabAPI(headers=headers)
await gl.getitem("/fake") # No exceptions raised.
class TestFormatUrl:
def test_absolute_url(self):
gl = MockGitLabAPI()
original_url = "https://gitlab.example.com/api/v4/projects"
url = gl.format_url(original_url, {})
assert url == original_url
def test_relative_url(self):
gl = MockGitLabAPI()
url = gl.format_url("/projects", {})
assert url == "https://gitlab.com/api/v4/projects"
def test_relative_url_non_default_url(self):
gl = MockGitLabAPI(url="https://my.gitlab.example.org")
url = gl.format_url("/projects", {})
assert url == "https://my.gitlab.example.org/api/v4/projects"
def test_relative_url_non_default_api_version(self):
gl = MockGitLabAPI(api_version="v3")
url = gl.format_url("/projects", {})
assert url == "https://gitlab.com/api/v3/projects"
def test_params(self):
gl = MockGitLabAPI()
url = "https://gitlab.com/api/v4/projects/9/trigger/pipeline"
params = {"token": "TOKEN", "ref": "master"}
# Pass params on an absolute URL.
url = gl.format_url(url, params)
assert (
url
== "https://gitlab.com/api/v4/projects/9/trigger/pipeline?token=TOKEN&ref=master"
)
# No parmas on an absolute URL.
url = gl.format_url(url, {})
assert url == url
# Pass params on a relative URL.
url = gl.format_url("/projects/9/trigger/pipeline", params)
assert (
url
== "https://gitlab.com/api/v4/projects/9/trigger/pipeline?token=TOKEN&ref=master"
)
def test_params_quoting(self):
gl = MockGitLabAPI()
url = "https://gitlab.com/api/v4/projects/9/trigger/pipeline"
params = {"token": "TOKEN", "ref": "my branch"}
url = gl.format_url(url, params)
assert (
url
== "https://gitlab.com/api/v4/projects/9/trigger/pipeline?token=TOKEN&ref=my+branch"
)
def test_params_update_existing_query_string(self):
gl = MockGitLabAPI()
url = "https://gitlab.com/api/v4/fake?page=1"
params = {"key1": "value1", "key2": "value2"}
url = gl.format_url(url, params)
assert url == "https://gitlab.com/api/v4/fake?page=1&key1=value1&key2=value2"
def test_params_list_of_items(self):
gl = MockGitLabAPI()
url = "https://gitlab.com/api/v4/fake"
params = {"key1": "value1", "key2": ["value2", "value3"]}
url = gl.format_url(url, params)
assert (
url == "https://gitlab.com/api/v4/fake?key1=value1&key2=value2&key2=value3"
)
|
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense,Activation
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score,train_test_split
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
import os
import keras
import pickle as pkl
import dataset_creator as DC
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def train(X,Y):
encoder = LabelEncoder()
encoded_y = encoder.fit_transform(Y)
dummy_y = np_utils.to_categorical(encoded_y,29)
(trainData,testData,trainLabels,testLabels) = train_test_split(X,Y,test_size=0.01,random_state=42)
'''
model = Sequential()
model.add(Dense(50,input_dim = 17,activation='relu'))
model.add(Dense(40,activation='relu'))
model.add(Dense(29))
model.add(Activation('softmax'))
model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
'''
model = keras.models.load_model('model_50_40.h5')
model.fit(trainData,trainLabels,epochs=10,verbose=1)
(loss,accuracy) = model.evaluate(testData,testLabels,verbose=1)
print("Loss: {}, accuracy: {}".format(loss,accuracy))
model.save('model_50_40.h5')
def model_prediction(X):
D = DC.returnToArabicDictionary()
model = keras.models.load_model('model_50_40.h5')
pred = model.predict_classes(X)
return [D[k] for k in pred]
'''
chunk = pd.read_csv('image_label_pair.csv')
chunk = chunk.values
X = chunk[:,:17].astype(float)
Y = chunk[:,17]
train(X,Y)
'''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class BizOrderQueryResponse(object):
def __init__(self):
self._action = None
self._action_mode = None
self._apply_id = None
self._biz_context_info = None
self._biz_id = None
self._biz_type = None
self._create_time = None
self._op_id = None
self._request_id = None
self._result_code = None
self._result_desc = None
self._status = None
self._sub_status = None
self._update_time = None
@property
def action(self):
return self._action
@action.setter
def action(self, value):
self._action = value
@property
def action_mode(self):
return self._action_mode
@action_mode.setter
def action_mode(self, value):
self._action_mode = value
@property
def apply_id(self):
return self._apply_id
@apply_id.setter
def apply_id(self, value):
self._apply_id = value
@property
def biz_context_info(self):
return self._biz_context_info
@biz_context_info.setter
def biz_context_info(self, value):
self._biz_context_info = value
@property
def biz_id(self):
return self._biz_id
@biz_id.setter
def biz_id(self, value):
self._biz_id = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def create_time(self):
return self._create_time
@create_time.setter
def create_time(self, value):
self._create_time = value
@property
def op_id(self):
return self._op_id
@op_id.setter
def op_id(self, value):
self._op_id = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
@property
def result_code(self):
return self._result_code
@result_code.setter
def result_code(self, value):
self._result_code = value
@property
def result_desc(self):
return self._result_desc
@result_desc.setter
def result_desc(self, value):
self._result_desc = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def sub_status(self):
return self._sub_status
@sub_status.setter
def sub_status(self, value):
self._sub_status = value
@property
def update_time(self):
return self._update_time
@update_time.setter
def update_time(self, value):
self._update_time = value
def to_alipay_dict(self):
params = dict()
if self.action:
if hasattr(self.action, 'to_alipay_dict'):
params['action'] = self.action.to_alipay_dict()
else:
params['action'] = self.action
if self.action_mode:
if hasattr(self.action_mode, 'to_alipay_dict'):
params['action_mode'] = self.action_mode.to_alipay_dict()
else:
params['action_mode'] = self.action_mode
if self.apply_id:
if hasattr(self.apply_id, 'to_alipay_dict'):
params['apply_id'] = self.apply_id.to_alipay_dict()
else:
params['apply_id'] = self.apply_id
if self.biz_context_info:
if hasattr(self.biz_context_info, 'to_alipay_dict'):
params['biz_context_info'] = self.biz_context_info.to_alipay_dict()
else:
params['biz_context_info'] = self.biz_context_info
if self.biz_id:
if hasattr(self.biz_id, 'to_alipay_dict'):
params['biz_id'] = self.biz_id.to_alipay_dict()
else:
params['biz_id'] = self.biz_id
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = self.biz_type.to_alipay_dict()
else:
params['biz_type'] = self.biz_type
if self.create_time:
if hasattr(self.create_time, 'to_alipay_dict'):
params['create_time'] = self.create_time.to_alipay_dict()
else:
params['create_time'] = self.create_time
if self.op_id:
if hasattr(self.op_id, 'to_alipay_dict'):
params['op_id'] = self.op_id.to_alipay_dict()
else:
params['op_id'] = self.op_id
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
if self.result_code:
if hasattr(self.result_code, 'to_alipay_dict'):
params['result_code'] = self.result_code.to_alipay_dict()
else:
params['result_code'] = self.result_code
if self.result_desc:
if hasattr(self.result_desc, 'to_alipay_dict'):
params['result_desc'] = self.result_desc.to_alipay_dict()
else:
params['result_desc'] = self.result_desc
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.sub_status:
if hasattr(self.sub_status, 'to_alipay_dict'):
params['sub_status'] = self.sub_status.to_alipay_dict()
else:
params['sub_status'] = self.sub_status
if self.update_time:
if hasattr(self.update_time, 'to_alipay_dict'):
params['update_time'] = self.update_time.to_alipay_dict()
else:
params['update_time'] = self.update_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BizOrderQueryResponse()
if 'action' in d:
o.action = d['action']
if 'action_mode' in d:
o.action_mode = d['action_mode']
if 'apply_id' in d:
o.apply_id = d['apply_id']
if 'biz_context_info' in d:
o.biz_context_info = d['biz_context_info']
if 'biz_id' in d:
o.biz_id = d['biz_id']
if 'biz_type' in d:
o.biz_type = d['biz_type']
if 'create_time' in d:
o.create_time = d['create_time']
if 'op_id' in d:
o.op_id = d['op_id']
if 'request_id' in d:
o.request_id = d['request_id']
if 'result_code' in d:
o.result_code = d['result_code']
if 'result_desc' in d:
o.result_desc = d['result_desc']
if 'status' in d:
o.status = d['status']
if 'sub_status' in d:
o.sub_status = d['sub_status']
if 'update_time' in d:
o.update_time = d['update_time']
return o
|
"""Gravatar Username Data Digger - Digger Downloaded Data for Usefull Information."""
import re
import logging
import urllib
import urllib.parse
from configparser import ConfigParser
from typing import Dict, List, Optional
from bs4 import BeautifulSoup, ResultSet, Tag
from OSIx.core.base_username_data_digger import SimpleUsernameDataDigger
from OSIx.core.bs4_helper import BS4Helper
from OSIx.core.decorator import bs4_error_hander
logger = logging.getLogger()
class GravatarUsernameDataDigger(SimpleUsernameDataDigger):
"""Gravatar Username Data Digger."""
def run(self, config: ConfigParser, args: Dict, data: Dict) -> None:
"""Execute Module."""
# Check the Activation and Get The Social Network Data
can_activate, username = self._can_activate(data=data)
if not can_activate or username is None:
return
# Check Gravatar Section
if 'gravatar' not in data:
data['gravatar'] = {}
logger.info('\t\tRunning...')
h_result: Dict = self.__get_gravatar_data(
username=username,
config=config
)
# Add to Data
data['gravatar'][username] = h_result
# Check if Found Anything
if h_result['username'] is None:
logger.info('\t\tUsername Not Found.')
return
def __get_gravatar_data(self, username: str, config: ConfigParser) -> Dict:
"""Get the Gravatar Data."""
# Download the Gravatar Profile Main Page Data
base_url = config['MODULE_GravatarUsernameDataDigger']['profile_url'].replace('{0}', username)
gravatar_data_profile: str = self._download_text(url=base_url, module='gravatar')
# Check if Found
if 'sorry, we couldn\'t find that' in gravatar_data_profile:
return {
'gravatar_id': None,
'image': None,
'username': None,
'fullname': None,
'location': None,
'links': [],
'gravatar_url': None
}
# Load HTML
bs4_helper_profile: BS4Helper = BS4Helper(soup=BeautifulSoup(gravatar_data_profile, 'html.parser'))
return {
'gravatar_id': self.__get_capture_group(regex=config['MODULE_GravatarUsernameDataDigger']['gravatar_id_regex'], target=gravatar_data_profile, index=0),
'image': self.__get_capture_group(regex=config['MODULE_GravatarUsernameDataDigger']['gravatar_image_regex'], target=gravatar_data_profile, index=0),
'username': username,
'fullname': self.__get_profile_fullname(bs4_helper_profile),
'location': self.__get_location(bs4_helper_profile),
'links': self.__get_links(bs4_helper_profile, config),
'gravatar_url': base_url
}
@bs4_error_hander()
def __get_capture_group(self, regex: str, target: str, index: int = 0) -> Optional[str]:
"""Safe Get the Capture Group Value."""
mt: Optional[re.Match[str]] = re.compile(regex).search(target)
if mt is None:
return None
return str(mt.groups()[index])
@bs4_error_hander(on_error_return_value='')
def __get_links(self, bs4_helper_profile: BS4Helper, config: ConfigParser) -> List:
"""Return the Username."""
h_result: List = []
list_details: ResultSet = [item for item in bs4_helper_profile.soup.find_all('ul', attrs={'class': 'list-details'}) if isinstance(item, Tag)]
# Iterate over "Find Me Online" & "Contact Me" Lists
for list_node in list_details:
# Iterate over List Items (Contacts, Links, etc)
for node in [item for item in list_node if isinstance(item, Tag)]:
# Parse Single Info
children: List = [item for item in node.children if isinstance(item, Tag)]
if len(children) < 2:
continue
is_target_js: bool = (children[1].find('a') is None)
entry: Dict = {
'name': children[0].text,
'full_target': self.__resolve_gravatar_info(children[1]) if not is_target_js else self.__resolve_gravatar_info_obfuscation(str(children[1].find('script').next), config)
}
entry['target'] = self.__sanitize(entry['full_target'])
h_result.append(entry)
return h_result
def __resolve_gravatar_info(self, target: ResultSet) -> str:
"""Resolve Gravatar Information."""
href: str = target.find('a').attrs['href']
if href == '#':
return str(target.find('a').text)
return str(urllib.parse.unquote(target.find('a').attrs['href']))
def __resolve_gravatar_info_obfuscation(self, info: str, config: ConfigParser) -> str:
"""Resolve Gravatar Information Obfuscation using JS."""
if 'grav_email' in info:
regex_a: str = config['MODULE_GravatarUsernameDataDigger']['gravatar_email_deobfuscation_regex'] # Capture This: ' grav_email( 'qualitybargain', 'mail.com' ); '
return f'{self.__get_capture_group(regex_a, info, 0)}@{self.__get_capture_group(regex_a, info, 1)}'
if 'xmpp' in info:
regex_b: str = config['MODULE_GravatarUsernameDataDigger']['gravatar_email_xmpp_regex'] # Capture This: ' grav_email( 'qualitybargain', 'mail.com' ); '
return f'{self.__get_capture_group(regex_b, info, 0)}'
return info
@bs4_error_hander(on_error_return_value='')
def __get_profile_fullname(self, bs4_helper_profile: BS4Helper) -> str:
"""Return the Username."""
return str(bs4_helper_profile.soup.find_all('div', attrs={'class', 'profile-description'})[0].find('h2').find('a').text)
@bs4_error_hander(on_error_return_value='')
def __get_location(self, bs4_helper_profile: BS4Helper) -> str:
"""Return the Profile Picture."""
return str(bs4_helper_profile.soup.find_all('div', attrs={'class', 'profile-description'})[0].find('p').text)
def __sanitize(self, content: str) -> str:
"""Sanitize String."""
return content\
.replace('aim:goim?screenname=', '')\
.replace('ymsgr:sendim?', '')\
.replace('skype:', '')
class GravatarDataPrinter(SimpleUsernameDataDigger):
"""Print Gravatar Data into Sysout."""
def run(self, config: ConfigParser, args: Dict, data: Dict) -> None:
"""Execute Module."""
# Check the Activation and Get The Social Network Data
can_activate, username = self._can_activate(data=data)
if not can_activate or username is None:
return
# Check if Found Anything
if data['gravatar'][username]['username'] is None:
logger.info('\t\tUsername Not Present.')
return
# Dump the Output File
if args['username_print_result']:
logger.info(f'\t\tGravatar ID......: {data["gravatar"][username]["gravatar_id"]}')
logger.info(f'\t\tUsername.........: {data["gravatar"][username]["username"]}')
logger.info(f'\t\tFull Name........: {data["gravatar"][username]["fullname"]}')
logger.info(f'\t\tProfile Picture..: {data["gravatar"][username]["image"]}')
logger.info(f'\t\tLocation.........: {data["gravatar"][username]["location"]}')
logger.info(f'\t\tLinks............: {len(data["gravatar"][username]["links"])}')
for paste in data["gravatar"][username]["links"]:
logger.info(f'\t\t\t{paste["name"]} ({paste["full_target"]}) > {paste["target"]}')
|
if __name__ == "__main__":
import sys
sys.path.append("/Users/mike.barrameda/Projects/mfadvisor/mfadvisor-api")
from db import local_engine
from db.base import Base
from models import Account, Category, Transaction, TransactionType
from scripts.seed_categories import seed_categories
from scripts.seed_transaction_types import seed_transaction_types
from scripts.seed_accounts import seed_accounts
from scripts.seed_chase_checking import seed_chase_checking
engine = local_engine()
engine.execute("DROP TABLE IF EXISTS transactions;")
engine.execute("DROP TABLE IF EXISTS categories;")
engine.execute("DROP TABLE IF EXISTS transaction_types;")
engine.execute("DROP TABLE IF EXISTS accounts;")
Base.metadata.create_all(engine)
seed_categories(engine)
seed_transaction_types(engine)
seed_accounts(engine)
seed_chase_checking(engine)
|
# =============================================================================
# Copyright 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import argparse
import json
import os
import nemo.collections.nlp as nemo_nlp
import nemo.collections.nlp.data.datasets.datasets_utils.data_preprocessing
import nemo.collections.nlp.utils.data_utils
from nemo import logging
from nemo.backends.pytorch.common.losses import CrossEntropyLossNM
from nemo.collections.nlp.callbacks.token_classification_callback import eval_epochs_done_callback, eval_iter_callback
from nemo.collections.nlp.data import NemoBertTokenizer, SentencePieceTokenizer
from nemo.collections.nlp.nm.data_layers import BertTokenClassificationDataLayer
from nemo.collections.nlp.nm.trainables import TokenClassifier
from nemo.utils.lr_policies import get_lr_policy
# Parsing arguments
parser = argparse.ArgumentParser(description="Token classification with pretrained BERT")
parser.add_argument("--local_rank", default=None, type=int)
parser.add_argument("--batch_size", default=8, type=int)
parser.add_argument("--max_seq_length", default=128, type=int)
parser.add_argument("--num_gpus", default=1, type=int)
parser.add_argument("--num_epochs", default=5, type=int)
parser.add_argument("--lr_warmup_proportion", default=0.1, type=float)
parser.add_argument("--lr", default=5e-5, type=float)
parser.add_argument("--lr_policy", default="WarmupAnnealing", type=str)
parser.add_argument("--weight_decay", default=0, type=float)
parser.add_argument("--optimizer_kind", default="adam", type=str)
parser.add_argument("--amp_opt_level", default="O0", type=str, choices=["O0", "O1", "O2"])
parser.add_argument("--data_dir", default="/data", type=str)
parser.add_argument("--fc_dropout", default=0.5, type=float)
parser.add_argument("--num_fc_layers", default=2, type=int)
parser.add_argument("--ignore_start_end", action='store_false')
parser.add_argument("--ignore_extra_tokens", action='store_false')
parser.add_argument("--none_label", default='O', type=str)
parser.add_argument("--no_shuffle_data", action='store_false', dest="shuffle_data")
parser.add_argument("--pretrained_bert_model", default="bert-base-cased", type=str)
parser.add_argument("--bert_checkpoint", default=None, type=str)
parser.add_argument("--bert_config", default=None, type=str, help="Path to bert config file in json format")
parser.add_argument(
"--tokenizer_model",
default="tokenizer.model",
type=str,
help="Path to pretrained tokenizer model, only used if --tokenizer is sentencepiece",
)
parser.add_argument(
"--tokenizer",
default="nemobert",
type=str,
choices=["nemobert", "sentencepiece"],
help="tokenizer to use, only relevant when using custom pretrained checkpoint.",
)
parser.add_argument(
"--work_dir",
default='output',
type=str,
help="The output directory where the model prediction and checkpoints will be written.",
)
parser.add_argument("--use_cache", action='store_true', help="Whether to cache preprocessed data")
parser.add_argument(
"--save_epoch_freq",
default=1,
type=int,
help="Frequency of saving checkpoint '-1' - step checkpoint won't be saved",
)
parser.add_argument(
"--save_step_freq",
default=-1,
type=int,
help="Frequency of saving checkpoint '-1' - step checkpoint won't be saved",
)
parser.add_argument("--loss_step_freq", default=250, type=int, help="Frequency of printing loss")
parser.add_argument("--use_weighted_loss", action='store_true', help="Flag to indicate whether to use weighted loss")
args = parser.parse_args()
if not os.path.exists(args.data_dir):
raise FileNotFoundError(
"Dataset not found. For NER, CoNLL-2003 dataset"
"can be obtained at"
"https://github.com/kyzhouhzau/BERT"
"-NER/tree/master/data."
)
nf = nemo.core.NeuralModuleFactory(
backend=nemo.core.Backend.PyTorch,
local_rank=args.local_rank,
optimization_level=args.amp_opt_level,
log_dir=args.work_dir,
create_tb_writer=True,
files_to_copy=[__file__],
add_time_to_log_dir=True,
)
logging.info(args)
output_file = f'{nf.work_dir}/output.txt'
if args.bert_checkpoint is None:
""" Use this if you're using a standard BERT model.
To see the list of pretrained models, call:
nemo_nlp.nm.trainables.huggingface.BERT.list_pretrained_models()
"""
tokenizer = NemoBertTokenizer(args.pretrained_bert_model)
model = nemo_nlp.nm.trainables.huggingface.BERT(pretrained_model_name=args.pretrained_bert_model)
else:
""" Use this if you're using a BERT model that you pre-trained yourself.
"""
if args.tokenizer == "sentencepiece":
special_tokens = nemo_nlp.utils.MODEL_SPECIAL_TOKENS['bert']
tokenizer = SentencePieceTokenizer(model_path=args.tokenizer_model)
elif args.tokenizer == "nemobert":
tokenizer = NemoBertTokenizer(args.pretrained_bert_model)
else:
raise ValueError(f"received unexpected tokenizer '{args.tokenizer}'")
if args.bert_config is not None:
with open(args.bert_config) as json_file:
config = json.load(json_file)
model = nemo_nlp.nm.trainables.huggingface.BERT(**config)
else:
model = nemo_nlp.nm.trainables.huggingface.BERT(pretrained_model_name=args.pretrained_bert_model)
model.restore_from(args.bert_checkpoint)
logging.info(f"Model restored from {args.bert_checkpoint}")
hidden_size = model.hidden_size
def create_pipeline(
pad_label=args.none_label,
max_seq_length=args.max_seq_length,
batch_size=args.batch_size,
num_gpus=args.num_gpus,
mode='train',
label_ids=None,
ignore_extra_tokens=args.ignore_extra_tokens,
ignore_start_end=args.ignore_start_end,
use_cache=args.use_cache,
dropout=args.fc_dropout,
num_layers=args.num_fc_layers,
classifier=TokenClassifier,
):
logging.info(f"Loading {mode} data...")
shuffle = args.shuffle_data if mode == 'train' else False
text_file = f'{args.data_dir}/text_{mode}.txt'
label_file = f'{args.data_dir}/labels_{mode}.txt'
if not (os.path.exists(text_file) or (os.path.exists(label_file))):
raise FileNotFoundError(
f'{text_file} or {label_file} not found. \
The data should be splitted into 2 files: text.txt and labels.txt. \
Each line of the text.txt file contains text sequences, where words\
are separated with spaces. The labels.txt file contains \
corresponding labels for each word in text.txt, the labels are \
separated with spaces. Each line of the files should follow the \
format: \
[WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and \
[LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).'
)
data_layer = BertTokenClassificationDataLayer(
tokenizer=tokenizer,
text_file=text_file,
label_file=label_file,
pad_label=pad_label,
label_ids=label_ids,
max_seq_length=max_seq_length,
batch_size=batch_size,
shuffle=shuffle,
ignore_extra_tokens=ignore_extra_tokens,
ignore_start_end=ignore_start_end,
use_cache=use_cache,
)
(input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask, labels) = data_layer()
if mode == 'train':
label_ids = data_layer.dataset.label_ids
class_weights = None
if args.use_weighted_loss:
logging.info(f"Using weighted loss")
label_freqs = data_layer.dataset.label_frequencies
class_weights = nemo.collections.nlp.data.datasets.datasets_utils.data_preprocessing.calc_class_weights(
label_freqs
)
logging.info(f"class_weights: {class_weights}")
classifier = classifier(
hidden_size=hidden_size, num_classes=len(label_ids), dropout=dropout, num_layers=num_layers
)
task_loss = CrossEntropyLossNM(logits_dim=3, weight=class_weights)
hidden_states = model(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
logits = classifier(hidden_states=hidden_states)
if mode == 'train':
loss = task_loss(logits=logits, labels=labels, loss_mask=loss_mask)
steps_per_epoch = len(data_layer) // (batch_size * num_gpus)
tensors_to_evaluate = [loss, logits]
return tensors_to_evaluate, loss, steps_per_epoch, label_ids, classifier
else:
tensors_to_evaluate = [logits, labels, subtokens_mask]
return tensors_to_evaluate, data_layer
train_tensors, train_loss, steps_per_epoch, label_ids, classifier = create_pipeline()
eval_tensors, data_layer = create_pipeline(mode='dev', label_ids=label_ids, classifier=classifier)
logging.info(f"steps_per_epoch = {steps_per_epoch}")
# Create trainer and execute training action
train_callback = nemo.core.SimpleLossLoggerCallback(
tensors=train_tensors,
print_func=lambda x: logging.info("Loss: {:.3f}".format(x[0].item())),
get_tb_values=lambda x: [["loss", x[0]]],
tb_writer=nf.tb_writer,
)
eval_callback = nemo.core.EvaluatorCallback(
eval_tensors=eval_tensors,
user_iter_callback=lambda x, y: eval_iter_callback(x, y),
user_epochs_done_callback=lambda x: eval_epochs_done_callback(x, label_ids, f'{nf.work_dir}/graphs'),
tb_writer=nf.tb_writer,
eval_step=steps_per_epoch,
)
ckpt_callback = nemo.core.CheckpointCallback(
folder=nf.checkpoint_dir, epoch_freq=args.save_epoch_freq, step_freq=args.save_step_freq
)
lr_policy_fn = get_lr_policy(
args.lr_policy, total_steps=args.num_epochs * steps_per_epoch, warmup_ratio=args.lr_warmup_proportion
)
nf.train(
tensors_to_optimize=[train_loss],
callbacks=[train_callback, eval_callback, ckpt_callback],
lr_policy=lr_policy_fn,
optimizer=args.optimizer_kind,
optimization_params={"num_epochs": args.num_epochs, "lr": args.lr},
)
|
"""Helper functions."""
class Board:
def __init__(self, text):
self.has_won = False
self.number_rows = [
(int(rows_text[0:2]),
int(rows_text[3:5]),
int(rows_text[6:8]),
int(rows_text[9:11]),
int(rows_text[12:14])) for rows_text in text.split('\n')]
self.unmarked_set = set()
for row in self.number_rows:
for n in row:
self.unmarked_set.add(n)
self.marked_coords = set() # row,col pairs
self.number_coords = dict()
for r in range(len(self.number_rows)):
for c in range(len(self.number_rows[0])):
n = self.number_rows[r][c]
self.number_coords[n] = (r, c)
def mark(self, number):
if number in self.unmarked_set:
self.unmarked_set.remove(number)
(r, c) = self.number_coords[number]
self.marked_coords.add( (r, c) )
w = True
for r2 in range(len(self.number_rows)):
if not (r2, c) in self.marked_coords:
w = False
if w:
self.has_won = True
w = True
for c2 in range(len(self.number_rows[0])):
if not (r, c2) in self.marked_coords:
w = False
if w:
self.has_won = True
def won(self):
return self.has_won
def unmarked_sum(self):
return sum(self.unmarked_set)
def read_input(input_path):
numbers = []
boards = []
with open(input_path) as f:
numbers = [
int(s) for s in
f.readline().rstrip().split(',')]
while len(f.readline()) > 0:
t = (
f.readline() +
f.readline() +
f.readline() +
f.readline() +
f.readline()).rstrip()
boards.append(Board(t))
return (numbers, boards)
|
from ..engine.agent import Agent
from ..engine.decision import *
import numpy as np
from typing import Union
PlayDecision = Union[ActionPhaseDecision, TreasurePhaseDecision]
def find_card_in_decision(decision, card_name):
if isinstance(decision, PlayDecision.__args__):
for idx, move in enumerate(decision.moves):
if hasattr(move, 'card') and move.card.name == card_name:
return [idx]
elif isinstance(decision, BuyPhaseDecision):
for idx, move in enumerate(decision.moves):
if hasattr(move, 'card_name') and move.card_name == card_name:
return [idx]
return [0]
def get_minimum_coin_card(decision):
card_coins = [c.coins for c in decision.moves.card]
return [np.argmin(card_coins)]
class PriorityAgent(Agent):
"""
Doesn't currently work right now...
"""
def __init__(self, card_priorities):
self.card_priorities = card_priorities
def policy(self, decision, state_view):
if not decision.optional and len(decision.moves) == 1:
return [0]
if decision.optional and len(decision.moves) == 0:
return []
if decision.prompt == 'Select a card to trash from enemy Bandit.' or \
decision.prompt == 'Discard down to 3 cards.':
return get_minimum_coin_card(decision)
if state_view.player.phase == TurnPhase.TREASURE_PHASE:
return [1]
if state_view.player.phase == TurnPhase.BUY_PHASE:
all_cards_money = [c.coins for c in state_view.player.previous_deck] \
or [0]
hand_money_ev = np.mean(all_cards_money) * 5
if state_view.player.coins >= 8 and hand_money_ev > 8:
return find_card_in_decision(decision, 'Province')
elif state_view.player.coins >= 6:
return find_card_in_decision(decision, 'Gold')
elif state_view.player.coins >= 3:
return find_card_in_decision(decision, 'Silver')
return [0]
|
from requests import post, get
"""
Related methods for submitting requests to the HTB V4 API
"""
def api_get(url: str, endpoint: str, headers: dict) -> list:
"""
api_get: Make a get request to HTB API
:param url: Target url to send request
:param endpoint: API path to a specific resource
:param headers: Headers http
:return: Response result
"""
return get(f"{url}{endpoint}", headers=headers, allow_redirects=False).json()
def api_post(url: str, endpoint: str, headers: dict, data: dict) -> list:
"""
api_post: Send data through http POST to HTB API
:param url: Target url to send request
:param endpoint: API target path
:param headers: Headers http
:param data: Data to send
:return: Response result
"""
return post(f"{url}{endpoint}", headers=headers, data=data)
|
class rotate:
def __init__(self, image=[[0.0]], angle=0):
from scipy import ndimage
self.rot_img = ndimage.rotate(image, angle)
def rotate_img(self: 'array_float'):
return self.rot_img
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2018-03-18 11:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("camps", "0024_populate_camp_shortslugs")]
operations = [
migrations.AlterField(
model_name="camp",
name="shortslug",
field=models.SlugField(
help_text="Abbreviated version of the slug. Used in IRC channel names and other places with restricted name length.",
verbose_name="Short Slug",
),
)
]
|
#
# Simson's Sharepoint implementation
# This is how you can read from sharepoint with Windows Domain authentication.
import win32com.client
url = 'https://....'
h = win32com.client.Dispatch('WinHTTP.WinHTTPRequest.5.1')
h.SetAutoLogonPolicy(0)
h.Open('GET', url, False)
h.Send()
result = h.responseText
result
|
from storages.backends.s3boto3 import S3Boto3Storage
from furl import furl
from django.utils.module_loading import import_string
from django.conf import settings
import os
class BaseS3Storage(S3Boto3Storage):
def url(self, name):
url = super(BaseS3Storage, self).url(name)
if not self.querystring_auth:
f = furl(url)
if 'x-amz-security-token' in f.args:
del f.args['x-amz-security-token']
url = f.url
if os.environ.get('CDN_NAME'):
f = furl(url)
f.set(host=os.environ.get('CDN_NAME'))
url = f.url
return url
class StaticS3Storage(BaseS3Storage):
location = 'static'
default_acl = 'public-read'
def url(self, name):
url = super(StaticS3Storage, self).url(name)
if name.endswith('/') and not url.endswith('/'):
url += '/'
return url
class PublicReadS3Storage(BaseS3Storage):
querystring_auth = False
default_acl = 'public-read'
class S3Storage(BaseS3Storage):
pass
def get_public_read_storage_class():
return import_string(getattr(settings, 'PUBLIC_READ_FILE_STORAGE', settings.DEFAULT_FILE_STORAGE))
public_read_storage = get_public_read_storage_class()()
|
from re import fullmatch
pattern1 = r"\s*\w{12,}\s*"
pattern2 = r"[^ieaou]+"
pattern3 = r"^[^ieaou]\w+[^ieaou]$"
pattern4 = r"^[ieaou]\w+[ieaou]$"
pattern5 = r"^\w{3}$"
for i in range(5, 21, 2):
pattern = r"^\w{" + str(i) + "}$"
pattern5 = pattern5 + "|" + pattern
print(pattern5)
with open("dictionary-tur.txt", mode="r") as myfile:
for word in myfile.readlines():
word = word.strip()
if fullmatch(pattern5, word):
print(word)
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from datetime import datetime
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
def create_backup_plan(self, BackupPlan: Dict, BackupPlanTags: Dict = None, CreatorRequestId: str = None) -> Dict:
pass
def create_backup_selection(self, BackupPlanId: str, BackupSelection: Dict, CreatorRequestId: str = None) -> Dict:
pass
def create_backup_vault(self, BackupVaultName: str, BackupVaultTags: Dict = None, EncryptionKeyArn: str = None, CreatorRequestId: str = None) -> Dict:
pass
def delete_backup_plan(self, BackupPlanId: str) -> Dict:
pass
def delete_backup_selection(self, BackupPlanId: str, SelectionId: str):
pass
def delete_backup_vault(self, BackupVaultName: str):
pass
def delete_backup_vault_access_policy(self, BackupVaultName: str):
pass
def delete_backup_vault_notifications(self, BackupVaultName: str):
pass
def delete_recovery_point(self, BackupVaultName: str, RecoveryPointArn: str):
pass
def describe_backup_job(self, BackupJobId: str) -> Dict:
pass
def describe_backup_vault(self, BackupVaultName: str) -> Dict:
pass
def describe_protected_resource(self, ResourceArn: str) -> Dict:
pass
def describe_recovery_point(self, BackupVaultName: str, RecoveryPointArn: str) -> Dict:
pass
def describe_restore_job(self, RestoreJobId: str) -> Dict:
pass
def export_backup_plan_template(self, BackupPlanId: str) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_backup_plan(self, BackupPlanId: str, VersionId: str = None) -> Dict:
pass
def get_backup_plan_from_json(self, BackupPlanTemplateJson: str) -> Dict:
pass
def get_backup_plan_from_template(self, BackupPlanTemplateId: str) -> Dict:
pass
def get_backup_selection(self, BackupPlanId: str, SelectionId: str) -> Dict:
pass
def get_backup_vault_access_policy(self, BackupVaultName: str) -> Dict:
pass
def get_backup_vault_notifications(self, BackupVaultName: str) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_recovery_point_restore_metadata(self, BackupVaultName: str, RecoveryPointArn: str) -> Dict:
pass
def get_supported_resource_types(self) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def list_backup_jobs(self, NextToken: str = None, MaxResults: int = None, ByResourceArn: str = None, ByState: str = None, ByBackupVaultName: str = None, ByCreatedBefore: datetime = None, ByCreatedAfter: datetime = None, ByResourceType: str = None) -> Dict:
pass
def list_backup_plan_templates(self, NextToken: str = None, MaxResults: int = None) -> Dict:
pass
def list_backup_plan_versions(self, BackupPlanId: str, NextToken: str = None, MaxResults: int = None) -> Dict:
pass
def list_backup_plans(self, NextToken: str = None, MaxResults: int = None, IncludeDeleted: bool = None) -> Dict:
pass
def list_backup_selections(self, BackupPlanId: str, NextToken: str = None, MaxResults: int = None) -> Dict:
pass
def list_backup_vaults(self, NextToken: str = None, MaxResults: int = None) -> Dict:
pass
def list_protected_resources(self, NextToken: str = None, MaxResults: int = None) -> Dict:
pass
def list_recovery_points_by_backup_vault(self, BackupVaultName: str, NextToken: str = None, MaxResults: int = None, ByResourceArn: str = None, ByResourceType: str = None, ByBackupPlanId: str = None, ByCreatedBefore: datetime = None, ByCreatedAfter: datetime = None) -> Dict:
pass
def list_recovery_points_by_resource(self, ResourceArn: str, NextToken: str = None, MaxResults: int = None) -> Dict:
pass
def list_restore_jobs(self, NextToken: str = None, MaxResults: int = None) -> Dict:
pass
def list_tags(self, ResourceArn: str, NextToken: str = None, MaxResults: int = None) -> Dict:
pass
def put_backup_vault_access_policy(self, BackupVaultName: str, Policy: str = None):
pass
def put_backup_vault_notifications(self, BackupVaultName: str, SNSTopicArn: str, BackupVaultEvents: List):
pass
def start_backup_job(self, BackupVaultName: str, ResourceArn: str, IamRoleArn: str, IdempotencyToken: str = None, StartWindowMinutes: int = None, CompleteWindowMinutes: int = None, Lifecycle: Dict = None, RecoveryPointTags: Dict = None) -> Dict:
pass
def start_restore_job(self, RecoveryPointArn: str, Metadata: Dict, IamRoleArn: str, IdempotencyToken: str = None, ResourceType: str = None) -> Dict:
pass
def stop_backup_job(self, BackupJobId: str):
pass
def tag_resource(self, ResourceArn: str, Tags: Dict):
pass
def untag_resource(self, ResourceArn: str, TagKeyList: List):
pass
def update_backup_plan(self, BackupPlanId: str, BackupPlan: Dict) -> Dict:
pass
def update_recovery_point_lifecycle(self, BackupVaultName: str, RecoveryPointArn: str, Lifecycle: Dict = None) -> Dict:
pass
|
# Copyright 2019 Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from crl.interactivesessions.shells.sudoshell import BashShell
from .clusterverifier import CorruptedVerifier
from .cluster import Cluster
from . import cluster
def test_get_host(clusterverifier):
clusterverifier.verify_get_host()
def test_master_external_ip(clusterverifier):
clusterverifier.verify_master_external_ip()
def test_get_hosts_with_profile(clusterverifier):
clusterverifier.verify_get_hosts_with_profiles()
def test_get_hosts_containing(clusterverifier):
clusterverifier.verify_hosts_containing()
def test_cluster_caching(clusterverifier):
clusterverifier.verify_cluster_config_caching()
def test_cluster_singleton():
assert Cluster() == Cluster()
def test_cluster_mgmt_shelldicts(clusterverifier):
clusterverifier.verify_mgmt_shelldicts()
def test_get_host_raises(clustermocks):
c = CorruptedVerifier(clustermocks)
c.verify_corrupted_raises()
def test_create_remotesession(clusterverifier):
clusterverifier.verify_create_remotesession()
def test_initialize_remotesession(clusterverifier):
clusterverifier.verify_initialize_remotesession()
def test_create_hostcli(clusterverifier):
clusterverifier.verify_create_hostcli()
def test_initialize_hostcli(clusterverifier):
clusterverifier.verify_initialize_hostcli()
def test_create_user_with_roles(clusterverifier):
clusterverifier.verify_create_user_with_roles()
def test_delete_users(clusterverifier):
clusterverifier.verify_delete_users()
def test_envcreator_usage(clusterverifier):
clusterverifier.verify_envcreator()
def test_sudoshell_in_cluster():
assert cluster.BashShell == BashShell
def test_is_dpdk(clusterverifier):
clusterverifier.verify_is_dpdk()
def test_get_hosts_with_dpdk(clusterverifier):
clusterverifier.verify_get_hosts_with_dpdk()
|
"""Maintains a persistent connection to the USRP.
Example usage:
>>> from scos_usrp.hardware import radio
>>> radio.is_available
True
>>> rx = radio
>>> rx.sample_rate = 10e6
>>> rx.frequency = 700e6
>>> rx.gain = 40
>>> samples = rx.acquire_time_domain_samples(1000)
"""
import logging
from datetime import datetime
import numpy as np
from scos_actions import utils
from scos_actions.hardware.radio_iface import RadioInterface
from scos_usrp import settings
from scos_usrp.hardware import calibration
from scos_usrp.hardware.mocks.usrp_block import MockUsrp
from scos_usrp.hardware.tests.resources.utils import create_dummy_calibration
logger = logging.getLogger(__name__)
logger.debug(f"USRP_CONNECTION_ARGS = {settings.USRP_CONNECTION_ARGS}")
# Testing determined these gain values provide a good mix of sensitivity and
# dynamic range performance
VALID_GAINS = (0, 20, 40, 60)
# Define the default calibration dicts
DEFAULT_SIGAN_CALIBRATION = {
"gain_sigan": None, # Defaults to gain setting
"enbw_sigan": None, # Defaults to sample rate
"noise_figure_sigan": 0,
"1db_compression_sigan": 100,
}
DEFAULT_SENSOR_CALIBRATION = {
"gain_sensor": None, # Defaults to sigan gain
"enbw_sensor": None, # Defaults to sigan enbw
"noise_figure_sensor": None, # Defaults to sigan noise figure
"1db_compression_sensor": None, # Defaults to sigan compression + preselector gain
"gain_preselector": 0,
"noise_figure_preselector": 0,
"1db_compression_preselector": 100,
}
class USRPRadio(RadioInterface):
@property
def last_calibration_time(self):
""" Returns the last calibration time from calibration data. """
if self.sensor_calibration:
return utils.convert_string_to_millisecond_iso_format(
self.sensor_calibration.calibration_datetime
)
return None
@property
def overload(self):
""" Returns True if overload occurred, otherwise returns False. """
return self._sigan_overload or self._sensor_overload
# Define thresholds for determining ADC overload for the sigan
ADC_FULL_RANGE_THRESHOLD = 0.98 # ADC scale -1<sample<1, magnitude threshold = 0.98
ADC_OVERLOAD_THRESHOLD = (
0.01 # Ratio of samples above the ADC full range to trigger overload
)
def __init__(
self,
sensor_cal_file=settings.SENSOR_CALIBRATION_FILE,
sigan_cal_file=settings.SIGAN_CALIBRATION_FILE,
):
self.uhd = None
self.usrp = None
self._is_available = False
self.sensor_calibration_data = None
self.sigan_calibration_data = None
self.sensor_calibration = None
self.sigan_calibration = None
self.lo_freq = None
self.dsp_freq = None
self._sigan_overload = False
self._sensor_overload = False
self._capture_time = None
self.connect()
self.get_calibration(sensor_cal_file, sigan_cal_file)
def connect(self):
if self._is_available:
return True
if settings.RUNNING_TESTS or settings.MOCK_RADIO:
logger.warning("Using mock USRP.")
random = settings.MOCK_RADIO_RANDOM
self.usrp = MockUsrp(randomize_values=random)
self._is_available = True
else:
try:
import uhd
self.uhd = uhd
except ImportError:
logger.warning("uhd not available - disabling radio")
return False
usrp_args = (
f"type=b200,{settings.USRP_CONNECTION_ARGS}" # find any b-series device
)
logger.debug(f"usrp_args = {usrp_args}")
try:
self.usrp = self.uhd.usrp.MultiUSRP(usrp_args)
except RuntimeError:
err = "No device found matching search parameters {!r}\n"
err = err.format(usrp_args)
raise RuntimeError(err)
logger.debug("Using the following USRP:")
logger.debug(self.usrp.get_pp_string())
try:
self._is_available = True
return True
except Exception as err:
logger.exception(err)
return False
@property
def is_available(self):
""" Returns True if initialized and ready to make measurements, otherwise returns False. """
return self._is_available
def get_calibration(self, sensor_cal_file, sigan_cal_file):
""" Get calibration data from sensor_cal_file and sigan_cal_file. """
# Set the default calibration values
self.sensor_calibration_data = DEFAULT_SENSOR_CALIBRATION.copy()
self.sigan_calibration_data = DEFAULT_SIGAN_CALIBRATION.copy()
# Try and load sensor/sigan calibration data
if not settings.RUNNING_TESTS and not settings.MOCK_RADIO:
try:
self.sensor_calibration = calibration.load_from_json(sensor_cal_file)
except Exception as err:
logger.error(
"Unable to load sensor calibration data, reverting to none"
)
logger.exception(err)
self.sensor_calibration = None
try:
self.sigan_calibration = calibration.load_from_json(sigan_cal_file)
except Exception as err:
logger.error("Unable to load sigan calibration data, reverting to none")
logger.exception(err)
self.sigan_calibration = None
else: # If in testing, create our own test files
dummy_calibration = create_dummy_calibration()
self.sensor_calibration = dummy_calibration
self.sigan_calibration = dummy_calibration
@property
def sample_rate(self):
""" Returns the currently configured sample rate in samples per second. """
return self.usrp.get_rx_rate()
@sample_rate.setter
def sample_rate(self, rate):
"""Sets the sample_rate and the clock_rate based on the sample_rate
:type sample_rate: float
:param sample_rate: Sample rate in samples per second
"""
self.usrp.set_rx_rate(rate)
fs_MSps = self.sample_rate / 1e6
logger.debug("set USRP sample rate: {:.2f} MSps".format(fs_MSps))
# Set the clock rate based on calibration
if self.sigan_calibration is not None:
clock_rate = self.sigan_calibration.get_clock_rate(rate)
else:
clock_rate = self.sample_rate
# Maximize clock rate while keeping it under 40e6
while clock_rate <= 40e6:
clock_rate *= 2
clock_rate /= 2
self.clock_rate = clock_rate
@property
def clock_rate(self):
""" Returns the currently configured clock rate in hertz. """
return self.usrp.get_master_clock_rate()
@clock_rate.setter
def clock_rate(self, rate):
""" Sets the signal analyzer clock rate.
:type rate: float
:param rate: Clock rate in hertz
"""
self.usrp.set_master_clock_rate(rate)
clk_MHz = self.clock_rate / 1e6
logger.debug("set USRP clock rate: {:.2f} MHz".format(clk_MHz))
@property
def frequency(self):
""" Returns the currently configured center frequency in hertz. """
return self.usrp.get_rx_freq()
@frequency.setter
def frequency(self, freq):
""" Sets the signal analyzer frequency.
:type freq: float
:param freq: Frequency in hertz
"""
self.tune_frequency(freq)
def tune_frequency(self, rf_freq, dsp_freq=0):
""" Tunes the signal analyzer as close as possible to the desired frequency.
:type rf_freq: float
:param rf_freq: Desired frequency in hertz
:type dsp_freq: float
:param dsp_freq: LO offset frequency in hertz
"""
if isinstance(self.usrp, MockUsrp):
tune_result = self.usrp.set_rx_freq(rf_freq, dsp_freq)
logger.debug(tune_result)
else:
tune_request = self.uhd.types.TuneRequest(rf_freq, dsp_freq)
tune_result = self.usrp.set_rx_freq(tune_request)
msg = "rf_freq: {}, dsp_freq: {}"
logger.debug(msg.format(rf_freq, dsp_freq))
self.lo_freq = rf_freq
self.dsp_freq = dsp_freq
@property
def gain(self):
""" Returns the currently configured gain setting in dB. """
return self.usrp.get_rx_gain()
@gain.setter
def gain(self, gain):
""" Sets the signal analyzer gain setting.
:type gain: float
:param gain: Gain in dB
"""
if gain not in VALID_GAINS:
err = "Requested invalid gain {}. ".format(gain)
err += "Choose one of {!r}.".format(VALID_GAINS)
logger.error(err)
return
self.usrp.set_rx_gain(gain)
msg = "set USRP gain: {:.1f} dB"
logger.debug(msg.format(self.usrp.get_rx_gain()))
def recompute_calibration_data(self):
"""Set the calibration data based on the currently tuning"""
# Try and get the sensor calibration data
self.sensor_calibration_data = DEFAULT_SENSOR_CALIBRATION.copy()
if self.sensor_calibration is not None:
self.sensor_calibration_data.update(
self.sensor_calibration.get_calibration_dict(
sample_rate=self.sample_rate,
lo_frequency=self.frequency,
gain=self.gain,
)
)
# Try and get the sigan calibration data
self.sigan_calibration_data = DEFAULT_SIGAN_CALIBRATION.copy()
if self.sigan_calibration is not None:
self.sigan_calibration_data.update(
self.sigan_calibration.get_calibration_dict(
sample_rate=self.sample_rate,
lo_frequency=self.frequency,
gain=self.gain,
)
)
# Catch any defaulting calibration values for the sigan
if self.sigan_calibration_data["gain_sigan"] is None:
self.sigan_calibration_data["gain_sigan"] = self.gain
if self.sigan_calibration_data["enbw_sigan"] is None:
self.sigan_calibration_data["enbw_sigan"] = self.sample_rate
# Catch any defaulting calibration values for the sensor
if self.sensor_calibration_data["gain_sensor"] is None:
self.sensor_calibration_data["gain_sensor"] = self.sigan_calibration_data[
"gain_sigan"
]
if self.sensor_calibration_data["enbw_sensor"] is None:
self.sensor_calibration_data["enbw_sensor"] = self.sigan_calibration_data[
"enbw_sigan"
]
if self.sensor_calibration_data["noise_figure_sensor"] is None:
self.sensor_calibration_data[
"noise_figure_sensor"
] = self.sigan_calibration_data["noise_figure_sigan"]
if self.sensor_calibration_data["1db_compression_sensor"] is None:
self.sensor_calibration_data["1db_compression_sensor"] = (
self.sensor_calibration_data["gain_preselector"]
+ self.sigan_calibration_data["1db_compression_sigan"]
)
def create_calibration_annotation(self):
""" Creates the SigMF calibration annotation. """
annotation_md = {
"ntia-core:annotation_type": "CalibrationAnnotation",
"ntia-sensor:gain_sigan": self.sigan_calibration_data["gain_sigan"],
"ntia-sensor:noise_figure_sigan": self.sigan_calibration_data[
"noise_figure_sigan"
],
"ntia-sensor:1db_compression_point_sigan": self.sigan_calibration_data[
"1db_compression_sigan"
],
"ntia-sensor:enbw_sigan": self.sigan_calibration_data["enbw_sigan"],
"ntia-sensor:gain_preselector": self.sensor_calibration_data[
"gain_preselector"
],
"ntia-sensor:noise_figure_sensor": self.sensor_calibration_data[
"noise_figure_sensor"
],
"ntia-sensor:1db_compression_point_sensor": self.sensor_calibration_data[
"1db_compression_sensor"
],
"ntia-sensor:enbw_sensor": self.sensor_calibration_data["enbw_sensor"],
}
return annotation_md
def check_sensor_overload(self, data):
""" Check for sensor overload in the measurement data. """
measured_data = data.astype(np.complex64)
time_domain_avg_power = 10 * np.log10(np.mean(np.abs(measured_data) ** 2))
time_domain_avg_power += (
10 * np.log10(1 / (2 * 50)) + 30
) # Convert log(V^2) to dBm
self._sensor_overload = False
# explicitly check is not None since 1db compression could be 0
if self.sensor_calibration_data["1db_compression_sensor"] is not None:
self._sensor_overload = (
time_domain_avg_power
> self.sensor_calibration_data["1db_compression_sensor"]
)
def acquire_time_domain_samples(self, num_samples, num_samples_skip=0, retries=5):
"""Acquire num_samples_skip+num_samples samples and return the last num_samples
:type num_samples: int
:param num_samples: Number of samples to acquire
:type num_samples_skip: int
:param num_samples_skip: Skip samples to allow signal analyzer DC offset and IQ imbalance algorithms to take effect
:type retries: int
:param retries: The number of retries to attempt when failing to acquire samples
:rtype: dictionary containing the following:
data - (list) measurement data
overload - (boolean) True if overload occurred, otherwise False
frequency - (float) Measurement center frequency in hertz
gain - (float) Measurement signal analyzer gain setting in dB
sample_rate - (float) Measurement sample rate in samples per second
capture_time - (string) Measurement capture time
calibration_annotation - (dict) SigMF calibration annotation
"""
self._sigan_overload = False
self._capture_time = None
# Get the calibration data for the acquisition
self.recompute_calibration_data()
nsamps = int(num_samples)
nskip = int(num_samples_skip)
# Compute the linear gain
db_gain = self.sensor_calibration_data["gain_sensor"]
linear_gain = 10 ** (db_gain / 20.0)
# Try to acquire the samples
max_retries = retries
while True:
# No need to skip initial samples when simulating the radio
if not settings.RUNNING_TESTS and not settings.MOCK_RADIO:
nsamps += nskip
self._capture_time = utils.get_datetime_str_now()
samples = self.usrp.recv_num_samps(
nsamps, # number of samples
self.frequency, # center frequency in Hz
self.sample_rate, # sample rate in samples per second
[0], # channel list
self.gain, # gain in dB
)
# usrp.recv_num_samps returns a numpy array of shape
# (n_channels, n_samples) and dtype complex64
assert samples.dtype == np.complex64
assert len(samples.shape) == 2 and samples.shape[0] == 1
data = samples[0] # isolate data for channel 0
data_len = len(data)
if not settings.RUNNING_TESTS and not settings.MOCK_RADIO:
data = data[nskip:]
if not len(data) == num_samples:
if retries > 0:
msg = "USRP error: requested {} samples, but got {}."
logger.warning(msg.format(num_samples + num_samples_skip, data_len))
logger.warning("Retrying {} more times.".format(retries))
retries = retries - 1
else:
err = "Failed to acquire correct number of samples "
err += "{} times in a row.".format(max_retries)
raise RuntimeError(err)
else:
logger.debug("Successfully acquired {} samples.".format(num_samples))
# Check IQ values versus ADC max for sigan compression
self._sigan_overload = False
i_samples = np.abs(np.real(data))
q_samples = np.abs(np.imag(data))
i_over_threshold = np.sum(i_samples > self.ADC_FULL_RANGE_THRESHOLD)
q_over_threshold = np.sum(q_samples > self.ADC_FULL_RANGE_THRESHOLD)
total_over_threshold = i_over_threshold + q_over_threshold
ratio_over_threshold = float(total_over_threshold) / num_samples
if ratio_over_threshold > self.ADC_OVERLOAD_THRESHOLD:
self._sigan_overload = True
# Scale the data back to RF power and return it
data /= linear_gain
self.check_sensor_overload(data)
measurement_result = {
"data": data,
"overload": self.overload,
"frequency": self.frequency,
"gain": self.gain,
"sample_rate": self.sample_rate,
"capture_time": self._capture_time,
"calibration_annotation": self.create_calibration_annotation(),
}
return measurement_result
@property
def healthy(self):
""" Check for ability to acquire samples from the signal analyzer. """
logger.debug("Performing USRP health check")
if not self.is_available:
return False
# arbitrary number of samples to acquire to check health of usrp
# keep above ~70k to catch previous errors seen at ~70k
requested_samples = 100000
try:
measurement_result = self.acquire_time_domain_samples(requested_samples)
data = measurement_result["data"]
except Exception as e:
logger.error("Unable to acquire samples from the USRP")
logger.error(e)
return False
if not len(data) == requested_samples:
logger.error("USRP data doesn't match request")
return False
return True
|
from kratos import *
from lake.modules.aggregator import Aggregator
from lake.attributes.config_reg_attr import ConfigRegAttr
from lake.passes.passes import lift_config_reg
import kratos as kts
class StorageConfigSeq(Generator):
'''
Sequence the reads and writes to the storage unit - if dealing with
a storage unit that has multiple r/w, should only use one of the ports
If the storage unit is wider than the data, this sequencer expects all
X writes/reads in order will come from the same word
'''
def __init__(self,
data_width,
config_addr_width,
addr_width,
fetch_width,
total_sets,
sets_per_macro):
super().__init__("storage_config_seq")
self.data_width = data_width
self.config_addr_width = config_addr_width
self.addr_width = addr_width
self.fetch_width = fetch_width
self.fw_int = int(self.fetch_width / self.data_width)
self.total_sets = total_sets
self.sets_per_macro = sets_per_macro
self.banks = int(self.total_sets / self.sets_per_macro)
self.set_addr_width = clog2(total_sets)
# self.storage_addr_width = self.
# Clock and Reset
self._clk = self.clock("clk")
self._rst_n = self.reset("rst_n")
# Inputs
# phases = [] TODO
# Take in the valid and data and attach an address + direct to a port
self._config_data_in = self.input("config_data_in",
self.data_width)
self._config_addr_in = self.input("config_addr_in",
self.config_addr_width)
self._config_wr = self.input("config_wr", 1)
self._config_rd = self.input("config_rd", 1)
self._config_en = self.input("config_en", self.total_sets)
self._clk_en = self.input("clk_en", 1)
self._rd_data_stg = self.input("rd_data_stg", self.data_width,
size=(self.banks,
self.fw_int),
explicit_array=True,
packed=True)
self._wr_data = self.output("wr_data",
self.data_width,
size=self.fw_int,
explicit_array=True,
packed=True)
self._rd_data_out = self.output("rd_data_out", self.data_width,
size=self.total_sets,
explicit_array=True,
packed=True)
self._addr_out = self.output("addr_out",
self.addr_width)
# One set per macro means we directly send the config address through
if self.sets_per_macro == 1:
width = self.addr_width - self.config_addr_width
if width > 0:
self.wire(self._addr_out, kts.concat(kts.const(0, width), self._config_addr_in))
else:
self.wire(self._addr_out, self._config_addr_in[self.addr_width - 1, 0])
else:
width = self.addr_width - self.config_addr_width - clog2(self.sets_per_macro)
self._set_to_addr = self.var("set_to_addr",
clog2(self.sets_per_macro))
self._reduce_en = self.var("reduce_en", self.sets_per_macro)
for i in range(self.sets_per_macro):
reduce_var = self._config_en[i]
for j in range(self.banks - 1):
reduce_var = kts.concat(reduce_var, self._config_en[i + (self.sets_per_macro * (j + 1))])
self.wire(self._reduce_en[i], reduce_var.r_or())
self.add_code(self.demux_set_addr)
if width > 0:
self.wire(self._addr_out, kts.concat(kts.const(0, width),
self._set_to_addr,
self._config_addr_in))
else:
self.wire(self._addr_out, kts.concat(self._set_to_addr, self._config_addr_in))
self._wen_out = self.output("wen_out", self.banks)
self._ren_out = self.output("ren_out", self.banks)
# Handle data passing
if self.fw_int == 1:
# If word width is same as data width, just pass everything through
self.wire(self._wr_data[0], self._config_data_in)
# self.wire(self._rd_data_out, self._rd_data_stg[0])
num = 0
for i in range(self.banks):
for j in range(self.sets_per_macro):
self.wire(self._rd_data_out[num], self._rd_data_stg[i])
num = num + 1
else:
self._data_wr_reg = self.var("data_wr_reg",
self.data_width,
size=self.fw_int - 1,
packed=True,
explicit_array=True)
# self._data_rd_reg = self.var("data_rd_reg",
# self.data_width,
# size=self.fw_int - 1,
# packed=True,
# explicit_array=True)
# Have word counter for repeated reads/writes
self._cnt = self.var("cnt", clog2(self.fw_int))
self._rd_cnt = self.var("rd_cnt", clog2(self.fw_int))
self.add_code(self.update_cnt)
self.add_code(self.update_rd_cnt)
# Gate wen if not about to finish the word
num = 0
for i in range(self.banks):
for j in range(self.sets_per_macro):
self.wire(self._rd_data_out[num], self._rd_data_stg[i][self._rd_cnt])
num = num + 1
# Deal with writing to the data buffer
self.add_code(self.write_buffer)
# Wire the reg + such to this guy
for i in range(self.fw_int - 1):
self.wire(self._wr_data[i], self._data_wr_reg[i])
self.wire(self._wr_data[self.fw_int - 1], self._config_data_in)
# If we have one bank, we can just always rd/wr from that one
if self.banks == 1:
if self.fw_int == 1:
self.wire(self._wen_out, self._config_wr)
else:
self.wire(self._wen_out,
self._config_wr & (self._cnt == (self.fw_int - 1)))
self.wire(self._ren_out, self._config_rd)
# Otherwise we need to extract the bank from the set
else:
if self.fw_int == 1:
for i in range(self.banks):
width = self.sets_per_macro
self.wire(self._wen_out[i], self._config_wr &
self._config_en[(i + 1) * width - 1, i * width].r_or())
else:
for i in range(self.banks):
width = self.sets_per_macro
self.wire(self._wen_out[i],
self._config_wr & self._config_en[(i + 1) * width - 1, i * width].r_or() &
(self._cnt == (self.fw_int - 1)))
for i in range(self.banks):
width = self.sets_per_macro
self.wire(self._ren_out[i],
self._config_rd & self._config_en[(i + 1) * width - 1, i * width].r_or())
@always_comb
def demux_set_addr(self):
self._set_to_addr = 0
for i in range(self.sets_per_macro):
if self._reduce_en[i]:
self._set_to_addr = i
@always_ff((posedge, "clk"), (negedge, "rst_n"))
def update_cnt(self):
if ~self._rst_n:
self._cnt = 0
# Increment when reading/writing - making sure
# that the sequencing is correct from app level!
elif (self._config_wr | self._config_rd) & self._config_en.r_or():
self._cnt = self._cnt + 1
@always_ff((posedge, "clk"), (negedge, "rst_n"))
def update_rd_cnt(self):
if ~self._rst_n:
self._rd_cnt = 0
# Increment when reading/writing - making sure
# that the sequencing is correct from app level!
else:
self._rd_cnt = self._cnt
@always_ff((posedge, "clk"), (negedge, "rst_n"))
def write_buffer(self):
if ~self._rst_n:
self._data_wr_reg = 0
# Increment when reading/writing - making sure
# that the sequencing is correct from app level!
elif self._config_wr & (self._cnt < self.fw_int - 1):
self._data_wr_reg[self._cnt] = self._config_data_in
if __name__ == "__main__":
db_dut = StorageConfigSeq(data_width=16,
config_addr_width=8,
addr_width=16,
fetch_width=64,
total_sets=2,
sets_per_macro=2)
verilog(db_dut, filename="storage_config_seq.sv",
additional_passes={"lift config regs": lift_config_reg})
|
from dis_snek import InteractionContext, slash_command
from ElevatorBot.commands.base import BaseScale
from ElevatorBot.core.destiny.dayOneRace import DayOneRace
from Shared.functions.readSettingsFile import get_setting
# =============
# Descend Only!
# =============
class DayOneRaceCommand(BaseScale):
# todo perms
@slash_command(
name="day_one_raid_race",
description="Starts the Day One raid completion announcer",
scopes=get_setting("COMMAND_GUILD_SCOPE"),
)
async def day_one_raid_race(self, ctx: InteractionContext):
if ctx.author.id != 238388130581839872:
await ctx.send(
"This is blocked for now, since it it waiting for a vital unreleased discord feature", ephemeral=True
)
return
racer = DayOneRace(ctx=ctx)
await racer.start()
def setup(client):
DayOneRaceCommand(client)
|
import os
from unittest import TestCase
from symbol.symbol_maker.reader import Reader
from django.conf import settings
class TestElement(TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def read_by_name(self, name):
path = os.path.join(settings.TEST_MML_HTML_DIR, name + ".html")
path = path.replace('\\', '/')
reader = Reader()
reader.read(path)
return reader
@staticmethod
def filter_by_type(elements, typename):
return [e for e in elements if e.type() == typename]
|
#!/usr/bin/env python3
import pygame
import sys
import time
import os
from pygame.locals import *
from .button import Button
from .globals import (screen_width, screen_height, FPS, fps_clock,
fontPath, assetsPath)
from .tool import blit_on, game_quit, remove_plant, show_plant, show_obstacle, set_water
class MenuFetch:
"""Menu for the fetch part representation.
Contains the information related to menu when fetching information from the robot.
Attributes:
surface: Surface on which the menu is blitted.
plant_im: Zone where the picture of the plant picture is printed.
title: Title of the Menu.
text_input: Text input object in which the user can write coordinates.
buttons: List of buttons object.
time_passed: Time passed between two update function call.
"""
# surface should be a surface of the size of 350x950
def __init__(self, surface_e, screen):
"""Initialiaze the menu with a surface and the main screen pygame object"""
# get a surface
self.surface = surface_e
self.surface.fill((255, 255, 255))
self.plant_im = None
self.title = "Fetch"
self.text_input = None
self.buttons = []
#self.init_buttons()
self.init_menu(screen)
self.time_passed = 0
self.is_color_chosing = False
def draw_on(self, screen):
"""Draw the menu on the screen"""
# Render background
if self.plant_im != None:
self.surface.blit(self.plant_im, (5,50))
screen.blit(self.surface, (950, 0))
# Render buttons
if self.plant_im != None:
for b in self.buttons:
b.draw(screen)
else:
for i in range(0, len(self.buttons) - 3):
self.buttons[i].draw(screen)
if self.text_input != None:
screen.blit(self.text_input.get_surface(), self.text_input.init_pos)
def init_menu(self, screen):
"""Set menu graphics at initialization."""
# Render texture background
brickTexture = os.path.join(assetsPath, 'textures/brick.jpg')
blit_on(self.surface, brickTexture)
# Render title
myFontPath = os.path.join(fontPath, 'Capture_it.ttf')
myfont = pygame.font.Font(myFontPath, 20)
title_surface = myfont.render(self.title, False, (255, 255, 255))
self.surface.blit(title_surface, (8, 0))
# Render instructions
# msg_surface = myfont.render("min: 0 | max: 899 | unit: cm/px",
# False, (0, 255, 55))
# self.surface.blit(msg_surface, (8, 48))
# msg_surface = myfont.render("Color: red, blue, green, yellow, purple",
# False, (255, 155, 255))
# self.surface.blit(msg_surface, (8, 65))
# msg_surface = myfont.render(" orange, white",
# False, (255, 155, 255))
# self.surface.blit(msg_surface, (8, 79))
# msg_surface = myfont.render("coordinate:",
# False, (255, 255, 255))
# self.surface.blit(msg_surface, (55, 110))
# msg_surface = myfont.render("color:",
# False, (255, 255, 255))
# self.surface.blit(msg_surface, (210, 110))
# Render buttons
#for b in self.buttons:
# b.draw(screen)
def update(self, screen, game_engine, game_map, events):
"""Update core of the menu.
Args:
self: The current menu object.
game_engine: game Object that contains everything related to the core application.
game_map: Map object that represents the map part of the application.
events: List of externals events
"""
# update text
if self.text_input != None:
self.text_input.update(events)
self.time_passed += fps_clock.tick(FPS)
# update buttons
if self.time_passed >= 150:
if self.plant_im != None:
for b in self.buttons:
b.update(screen, self, game_engine, game_map)
else:
for i in range(0, len(self.buttons) - 3):
self.buttons[i].update(screen, self, game_engine, game_map)
fps_clock.tick(30)
def init_buttons(self, game_engine):
"""Initialize the list of the buttons with the game engine."""
for p in game_engine.plants:
buttonPlantA = os.path.join(assetsPath, 'misc/plant_a.png')
buttonPlant = os.path.join(assetsPath, 'misc/plant.png')
button_plant = Button(p.pos_x, p.pos_y, p.width, p.height,
buttonPlantA, buttonPlant, show_plant, p)
self.buttons.append(button_plant)
for p in game_engine.obstacles:
buttonObA = os.path.join(assetsPath, 'misc/box_a.png')
buttonOb = os.path.join(assetsPath, 'misc/box.png')
button_ob = Button(p.pos_x, p.pos_y, p.width, p.height,
buttonObA, buttonOb, show_obstacle, p)
self.buttons.append(button_ob)
buttonQuitA = os.path.join(assetsPath, 'buttons/button_quit_a.png')
buttonQuit = os.path.join(assetsPath, 'buttons/button_quit.png')
button_quit = Button(1000, 800, 232, 93, buttonQuitA, buttonQuit,
game_quit)
buttonFinishA = os.path.join(assetsPath, 'buttons/button_finish_a.png')
buttonFinish = os.path.join(assetsPath, 'buttons/button_finish.png')
button_finish = Button(1000, 700, 232, 93,
buttonFinishA, buttonFinish, game_quit)
text_input_button = Button(969, 323, 95, 30,
None,
None,
None)
buttonSetA = os.path.join(assetsPath, 'buttons/button_set_a.png')
buttonSet = os.path.join(assetsPath, 'buttons/button_set.png')
button_set = Button(1000, 400, 232, 93,
buttonSetA, buttonSet, set_water)
buttonRemoveA = os.path.join(assetsPath, 'buttons/button_remove_a.png')
buttonRemove = os.path.join(assetsPath, 'buttons/button_remove.png')
button_remove = Button(1000, 500, 232, 93,
buttonRemoveA, buttonRemove, remove_plant)
self.buttons.append(button_quit)
self.buttons.append(button_finish)
self.buttons.append(text_input_button)
self.buttons.append(button_set)
self.buttons.append(button_remove)
|
needs_sphinx = '1.1'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.autosummary', 'matplotlib.sphinxext.plot_directive']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'scipy-sphinx-theme'
copyright = u'2013, Surya Kasturi and Pauli Virtanen'
version = '0.1'
release = '0.1'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
html_theme = 'scipy'
html_theme_path = ['_theme']
#html_logo = '_static/scipyshiny_small.png'
html_static_path = ['_static']
html_theme_options = {
"edit_link": "true",
"sidebar": "right",
"scipy_org_logo": "true",
"rootlinks": [("http://scipy.org/", "Scipy.org"),
("http://docs.scipy.org/", "Docs")]
}
#------------------------------------------------------------------------------
# Plot style
#------------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
import scipy as sp
np.random.seed(123)
"""
plot_include_source = True
plot_formats = [('png', 96), 'pdf']
plot_html_show_formats = False
import math
phi = (math.sqrt(5) + 1)/2
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
|
import matplotlib.pyplot as plt
import csv
import numpy as np
import os
import re
def getJobHatch(job):
if job == "Foraging" :
return '/';
elif job == "FeedLarvae":
return '\\';
else:
return '';
larvaTask = "LarvaTask";
nurseTask = "FeedLarvae";
foragerTask = "Foraging";
giveFoodTask = "GiveFood";
askFoodTask = "AskingFood";
otherTask = "Other";
folderName = "expe/bees";
path = '../' + folderName + '/';
files = []
for r, d, f in os.walk(path):
for file in f:
if '.csv' in file:
files.append(os.path.join(r, file))
fileIndex = 1
bees = {}
for f in files:
print(f + " " + str(fileIndex) + "/" + str(len(files)));
beeName = f.split("/bees")[1].split(".")[0].split("_")[7];
bees[beeName] = {}
bees[beeName]["EO"] = []
bees[beeName]["HJ"] = []
bees[beeName]["Tasks"] = []
fileIndex += 1
with open(f) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0;
lastTask = "";
for row in csv_reader:
if(len(row)>2 and row[2] != ""):
beeTask = row[0]
beeHJ = float(row[1])
beeEO = float(row[2])
bees[beeName]["EO"].append(beeEO);
bees[beeName]["HJ"].append(beeHJ);
if(lastTask == beeTask):
bees[beeName]["Tasks"][-1][1] += 1
else:
#print(bees[beeName]["Tasks"])
bees[beeName]["Tasks"].append([beeTask,1])
lastTask = beeTask
#print(bees[beeName]["Tasks"])
#print(bees)
plt.figure(0, figsize=(25,15))
index = 1
for beeName in bees:
# row, col
ax = plt.subplot(3,8, index, title=beeName);
incr = 0;
for tuple in bees[beeName]["Tasks"]:
#plt.bar(theValue + tt,-25, width=-theValue,align='edge', color='white',hatch = getJobHatch(theKey), label=theKey);
plt.bar(tuple[1] + incr, 1, width=-tuple[1], align='edge', color='white',hatch = getJobHatch(tuple[0]), label=tuple[0]);
incr += tuple[1]
size = len(bees[beeName]["EO"])
plt.plot(range(size), bees[beeName]["EO"], label="EO")
plt.plot(range(size), bees[beeName]["HJ"], label="HJ")
newHandles = [];
newLabels = [];
handles, labels = ax.get_legend_handles_labels();
for li in range(len(labels)):
l = labels[li];
if l not in newLabels:
newLabels.append(l);
newHandles.append(handles[li]);
plt.legend(handles=newHandles, labels=newLabels);
index+=1
plt.savefig("beeLives.png")
|
"""
Client creation action.
~~~~~~~~~~~~~~~~~~~~~~~
"""
from .action import Action
from .action import ActionExecutionException
from .action import InvalidActionConfigurationException
from .utils import get_user_roles
from .utils import InvalidUserResponse
from .utils import process_user_roles
import requests
import urllib
class CreateClientAction(Action):
@staticmethod
def valid_deploy_env(deploy_env):
"""
Returns True if the provided deployment environment is valid for this action, False otherwise
:param deploy_env: The target deployment environment.
:return: True always, as this action is valid for all environments.
"""
return True
def __init__(self, name, config_file_dir, action_config_json, *args, **kwargs):
"""
Constructor.
:param name: The action name.
:param config_file_dir: The directory containing the configuration file
:param action_config_json: The JSON configuration for this action
"""
super(CreateClientAction, self).__init__(name, *args, **kwargs)
self.action_config_json = action_config_json
if 'realmName' not in action_config_json:
raise InvalidActionConfigurationException('Configuration "{0}" missing property "realmName"'.format(name))
self.realm_name = action_config_json['realmName']
if 'client' not in action_config_json:
raise InvalidActionConfigurationException('Configuration "{0}" missing property "client"'.format(name))
self.client_data = action_config_json['client']
self.client_id = self.client_data.get('clientId', None)
if not self.client_id:
raise InvalidActionConfigurationException('Client configuration for "{0}" missing property "clientId"'.format(name))
def execute(self, keycloak_client):
"""
Execute this action. In this case, attempt to create a client.
:param keycloak_client: The client to use when interacting with Keycloak
"""
# Process the client data.
print('==== Creating client "{0}" in realm "{1}"...'.format(self.client_id, self.realm_name))
existing_client_data = self.get_client_by_client_id(self.realm_name, self.client_id, keycloak_client)
if not existing_client_data:
print('==== Client "{0}" does not exist, creating...'.format(self.client_id))
client_creation_path = '/admin/realms/{0}/clients'.format(urllib.parse.quote(self.realm_name))
create_response = keycloak_client.post(client_creation_path, json=self.client_data)
if create_response.status_code == requests.codes.created:
print('==== Client "{0}" created.'.format(self.client_id))
existing_client_data = self.get_client_by_client_id(self.realm_name, self.client_id, keycloak_client)
client_uuid = existing_client_data['id']
else:
raise ActionExecutionException('Unexpected response for client creation request ({0})'.format(create_response.status_code))
else:
print('==== Client "{0}" exists, updating...'.format(self.client_id))
client_uuid = existing_client_data['id']
client_update_path = '/admin/realms/{0}/clients/{1}'.format(
urllib.parse.quote(self.realm_name),
urllib.parse.quote(client_uuid)
)
update_response = keycloak_client.put(client_update_path, json=self.client_data)
if update_response.status_code == requests.codes.no_content:
print('==== Client "{0}" updated.'.format(self.client_id))
else:
raise ActionExecutionException('Unexpected response for client update request ({0})'.format(update_response.status_code))
# Now update the secret.
if 'secret' in self.client_data:
print('==== NOT updating client "{0}" secret, as it is currently broken...'.format(self.client_id))
# NOTE: the following code is disabled because it requires a custom keycloak extension, which is not currently working
if False and 'secret' in self.client_data:
print('==== Updating client "{0}" secret...'.format(self.client_id))
client_secret_update_path = '/realms/{0}/clients-custom/{1}/client-secret'.format(
urllib.parse.quote(self.realm_name),
urllib.parse.quote(client_uuid)
)
client_secret_update_response = keycloak_client.put(
client_secret_update_path, json={'secret': self.client_data['secret']}
)
if client_secret_update_response.status_code == requests.codes.no_content:
print('==== Client "{0}" secret updated.'.format(self.client_id))
else:
raise ActionExecutionException('Unexpected response for client secret update request ({0})'.format(
client_secret_update_response.status_code
))
# We always need to process mappers, as Keycloak adds default mappers on client creation calls.
self.update_protocol_mappers(existing_client_data, keycloak_client)
# Process the service account roles.
self.process_service_account_roles(client_uuid, self.action_config_json.get('roles', []), keycloak_client)
def update_protocol_mappers(self, existing_client_data, keycloak_client):
"""
Update the protocol mappers for the client.
:param existing_client_data: The existing client data
:param keycloak_client: The client to use when interacting with Keycloak
"""
print('==== Processing client "{0}" protocol mappers...'.format(self.client_id))
client_uuid = existing_client_data['id']
existing_mappers = existing_client_data['protocolMappers']
new_mappers = self.client_data['protocolMappers']
# Mapper names are unique, so we can use that field to see what needs to be updated, created, or deleted.
existing_mappers_by_name = self.mapper_list_to_map_by_name(existing_mappers)
new_mappers_by_name = self.mapper_list_to_map_by_name(new_mappers)
# See what needs to be created or updated.
for name, config in new_mappers_by_name.items():
if name in existing_mappers_by_name:
self.update_protocol_mapper(client_uuid, existing_mappers_by_name[name]['id'], config, keycloak_client)
else:
self.create_protocol_mapper(client_uuid, config, keycloak_client)
# See what needs to be deleted.
for name, config in existing_mappers_by_name.items():
if name not in new_mappers_by_name:
self.delete_protocol_mapper(client_uuid, existing_mappers_by_name[name]['id'], name, keycloak_client)
print('==== Processed client "{0}" protocol mappers.'.format(self.client_id))
@staticmethod
def mapper_list_to_map_by_name(mapper_list):
"""
Convert a list of protocol mappers to a map of mappers by keyed by name.
:param mapper_list: The list to convert
:return: The resulting map
"""
by_name = {}
for mapper in mapper_list:
by_name[mapper['name']] = mapper
return by_name
def update_protocol_mapper(self, client_uuid, mapper_id, mapper_config, keycloak_client):
"""
Update a protocol mapper.
:param client_uuid: The UUID of the client
:param mapper_id: the UUID of the mapper
:param mapper_config: The mapper config to use in the update request
:param keycloak_client: The client to use when interacting with Keycloak
"""
print('==== Updating client "{0}" protocol mapper "{1}".'.format(self.client_id, mapper_config['name']))
path = '/admin/realms/{0}/clients/{1}/protocol-mappers/models/{2}'.format(
urllib.parse.quote(self.realm_name),
urllib.parse.quote(client_uuid),
urllib.parse.quote(mapper_id)
)
mapper_config['id'] = mapper_id
update_response = keycloak_client.put(path, json=mapper_config)
if update_response.status_code != requests.codes.no_content:
raise ActionExecutionException('Unexpected response for client protocol mapper update request ({0})'.format(update_response.status_code))
def create_protocol_mapper(self, client_uuid, mapper_config, keycloak_client):
"""
Create a protocol mapper.
:param client_uuid: The UUID of the client
:param mapper_config: The mapper config to use in the create request
:param keycloak_client: The client to use when interacting with Keycloak.
:return:
"""
print('==== Creating client "{0}" protocol mapper "{1}".'.format(self.client_id, mapper_config['name']))
path = '/admin/realms/{0}/clients/{1}/protocol-mappers/models'.format(
urllib.parse.quote(self.realm_name),
urllib.parse.quote(client_uuid)
)
create_response = keycloak_client.post(path, json=mapper_config)
if create_response.status_code != requests.codes.created:
raise ActionExecutionException('Unexpected response for client protocol mapper create request ({0})'.format(create_response.status_code))
def delete_protocol_mapper(self, client_uuid, mapper_id, mapper_name, keycloak_client):
"""
Delete a protocol mapper.
:param client_uuid: The UUID of the client
:param mapper_id: the UUID of the mapper
:param mapper_name: The name of the mapper
:param keycloak_client: The client to use when interacting with Keycloak
"""
print('==== Deleting client "{0}" protocol mapper "{1}".'.format(self.client_id, mapper_name))
path = '/admin/realms/{0}/clients/{1}/protocol-mappers/models/{2}'.format(
urllib.parse.quote(self.realm_name),
urllib.parse.quote(client_uuid),
urllib.parse.quote(mapper_id)
)
delete_response = keycloak_client.delete(path)
if delete_response.status_code != requests.codes.no_content:
raise ActionExecutionException('Unexpected response for client protocol mapper delete request ({0})'.format(delete_response.status_code))
def get_service_account_user(self, client_uuid, keycloak_client):
"""
Get the service account user for the client.
:param client_uuid: The client UUID
:param keycloak_client: The client to use when interacting with Keycloak
:return: The service account user configuration
"""
path = '/admin/realms/{0}/clients/{1}/service-account-user'.format(self.realm_name, client_uuid)
get_response = keycloak_client.get(path)
if get_response.status_code == requests.codes.ok:
return get_response.json()
if get_response.status_code == requests.codes.not_found:
return None
raise InvalidUserResponse('Unexpected user get response ({0})'.format(get_response.status_code))
def process_service_account_roles(self, client_uuid, service_account_roles, keycloak_client):
"""
Process the service account roles for the client.
:param client_uuid: The client UUID
:param service_account_roles: The roles to assign to the service account
:param keycloak_client: The client to use when interacting with Keycloak
"""
print('==== Processing client "{0}" service account roles...'.format(self.client_id))
user_config = self.get_service_account_user(client_uuid, keycloak_client)
if not user_config and len(service_account_roles) > 0:
raise ActionExecutionException('No service account user found for client "{0}"'.format(self.client_id))
user_id = user_config['id']
existing_roles = get_user_roles(self.realm_name, user_id, keycloak_client)
process_user_roles(self.realm_name, user_id, existing_roles, service_account_roles, keycloak_client)
print('==== Processed client "{0}" service account roles.'.format(self.client_id))
|
# coding=utf-8
"""
"""
from typing import List, Optional
from modelscript.megamodels.dependencies.metamodels import (
MetamodelDependency
)
from modelscript.megamodels.metamodels import Metamodel
from modelscript.base.metrics import Metrics
# ---------------------------------------------------------------
# Abstract syntax
# ---------------------------------------------------------------
from modelscript.metamodels.permissions.gpermissions import (
Permission,
PermissionSet,
PermissionModel,
PermissionRule
)
from modelscript.metamodels.permissions.sar import (
Action,
SAR
)
from modelscript.metamodels.permissions.sar import (
Subject,
Resource,
Action,
)
from modelscript.metamodels.usecases import (
UsecaseModel
)
__all__=(
# Concrete actions
'CreateAction',
'ReadAction',
'UpdateAction',
'DeleteAction',
'ExecuteAction',
# Concrete models and rule
'UCPermissionModel',
'FactorizedPermissionRule'
)
ClassModel='ClassModel'
#------------------------------------------------------------------------------
# Usecases/Classes specific
#------------------------------------------------------------------------------
CreateAction = Action('create', None)
ReadAction = Action('read', None)
UpdateAction = Action('update', None)
DeleteAction = Action('delete', None)
ExecuteAction = Action('execute', None)
class FactorizedPermissionRule(PermissionRule):
def __init__(self, model, subjects, actions, resources, astNode=None, lineNo=None):
#type: (UCPermissionModel, List[Subject], List[Action], List[Resource], Optional['ASTNode'], Optional[int])->None
"""
A concrete rule representing at the same time various
permissions thanks to the factorisation of
subjects, actors and resources.
For instance the rule
((S1,S2), (A1,A2,A3), (R1))
represents 6 permissions.
"""
super(FactorizedPermissionRule, self).__init__(
model=model,
lineNo=lineNo,
astNode=astNode)
self.subjects=subjects
#type: List[Subject]
self.actions=actions
#type: List[Action]
self.resources=resources
#type: List[Resource]
def __str__(self):
return '%s %s %s' % (
','.join([s.subjectLabel for s in self.subjects]),
','.join([s.actionLabel for s in self.actions]),
','.join([s.resourceLabel for s in self.resources]),
)
class UCPermissionModel(PermissionModel):
"""
A usecase-class permission model
"""
def __init__(self):
super(UCPermissionModel, self).__init__()
self.usecaseModel=None
#type: Optional[UsecaseModel]
#set later
self.classModel=None
#type: Optional[ClassModel]
self.rules=[]
#type: List[FactorizedPermissionRule]
# The list of rules in the model. Actually the order
# in this list is not important.
# Note that "rules" is already defined in the superclass
# but defining it again here allow to have better typing
self._permissionSet=None
#type: Optional[PermissionSet]
# The permission set, computed on demand.
# see permissionSet property.
# The permission set is just the expansion of the rule
# component in many different permissions.
@property
def metamodel(self):
#type: () -> Metamodel
return METAMODEL
@property
def permissionSet(self):
#type: ()->PermissionSet
if self._permissionSet is None:
self._interpret()
# noinspection PyTypeChecker
return self._permissionSet
@property
def metrics(self):
#type: () -> Metrics
ms=super(UCPermissionModel, self).metrics
ms.addList((
('rule', len(self.rules)),
('permission', len(self.permissionSet.permissions) ),
))
return ms
def _interpret(self):
#type: ()->None
self._permissionSet= PermissionSet()
for rule in self.rules:
for s in rule.subjects:
for r in rule.resources:
for a in rule.actions:
p= Permission(s, a, r, rule)
self._permissionSet.permissions.add(p)
rule.permissions.append(p)
def __str__(self):
return '\n'.join([str(r) for r in self.rules])
METAMODEL=Metamodel(
id='pe', # other model could be registered
label='permission',
extension='.pes',
modelClass=UCPermissionModel
)
MetamodelDependency(
sourceId='pe',
targetId='gl',
optional=True,
multiple=True,
)
MetamodelDependency(
sourceId='pe',
targetId='us',
optional=False,
multiple=False,
)
MetamodelDependency(
sourceId='pe',
targetId='cl',
optional=False,
multiple=False,
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
import decimal
import json
import os
import re
import subprocess
import sys
import urllib.error
import urllib.parse
import urllib.request
import webbrowser
import http.server
import socketserver
from datetime import datetime
# from pprint import pprint
import requests
from math import log10, ceil
import numpy as np
import numpy.ma as ma
from fpdf import FPDF
from currency_converter import CurrencyConverter
from json2html import *
from K2PKConfig import *
from mysql.connector import Error, MySQLConnection
# NOTE: Set precision to cope with nano, pico & giga multipliers.
ctx = decimal.Context()
ctx.prec = 12
# Set colourscheme (this could go into preferences.ini)
# Colour seems to be most effective when used against a white background
adequate = 'rgb(253,246,227)'
# adequate = 'rgba(0, 60, 0, 0.15)'
lowstock = '#f6f6f6'
# lowstock = 'rgba(255, 255, 255, 0)'
nopkstock = '#c5c5c5'
# nopkstock = 'rgba(0, 60, 60, 0.3)'
multistock = '#e5e5e5'
# multistock = 'rgba(255, 255, 255, 0)'
minPriceCol = 'rgb(133,153,0)'
try:
currencyConfig = read_currency_config()
baseCurrency = (currencyConfig['currency'])
except KeyError:
print("No currency configured in config.ini")
assert sys.version_info >= (3, 4)
file_name = sys.argv[1]
projectName, ext = file_name.split(".")
print(projectName)
numBoards = 0
try:
while numBoards < 1:
qty = input("How many boards? (Enter 1 or more) > ")
numBoards = int(qty)
print("Calculations for ", numBoards, " board(s)")
except ValueError:
print("Integer values only, >= 1. Quitting now")
raise SystemExit
# Make baseline barcodes and web directories
try:
os.makedirs('./assets/barcodes')
except OSError:
pass
try:
os.makedirs('./assets/web')
except OSError:
pass
invalidate_BOM_Cost = False
try:
distribConfig = read_distributors_config()
preferred = (distribConfig['preferred'])
except KeyError:
print('No preferred distributors in config.ini')
pass
# Initialise empty cost and coverage matrix
prefCount = preferred.count(",") + 1
costMatrix = [0] * prefCount
coverageMatrix = [0] * prefCount
countMatrix = [0] * prefCount
voidMatrix = [0] * prefCount
def float_to_str(f):
d1 = ctx.create_decimal(repr(f))
return format(d1, 'f')
def convert_units(num):
'''
Converts metric multipliers values into a decimal float.
Takes one input eg 12.5m and returns the decimal (0.0125) as a string. Also supports
using the multiplier as the decimal marker e.g. 4k7
'''
factors = ["G", "M", "K", "k", "R", "", ".", "m", "u", "n", "p"]
conversion = {
'G': '1000000000',
'M': '1000000',
'K': '1000',
'k': '1000',
'R': '1',
'.': '1',
'': '1',
'm': '0.001',
"u": '0.000001',
'n': '0.000000001',
'p': '0.000000000001'
}
val = ""
mult = ""
for i in range(len(num)):
if num[i] == ".":
mult = num[i]
if num[i] in factors:
mult = num[i]
val = val + "."
else:
if num[i].isdigit():
val = val + (num[i])
else:
print("Invalid multiplier")
return "0"
if val.endswith("."):
val = val[:-1]
m = float(conversion[mult])
v = float(val)
r = float_to_str(m * v)
r = r.rstrip("0")
r = r.rstrip(".")
return r
def limit(num, minimum=10, maximum=11):
'''
Limits input 'num' between minimum and maximum values.
Default minimum value is 10 and maximum value is 11.
'''
return max(min(num, maximum), minimum)
def partStatus(partID, parameter):
dbconfig = read_db_config()
try:
conn = MySQLConnection(**dbconfig)
cursor = conn.cursor()
sql = "SELECT DISTINCT R.stringValue FROM PartParameter R WHERE (R.name = '{}') AND (R.part_id = {})".format(
parameter, partID)
cursor.execute(sql)
partStatus = cursor.fetchall()
if partStatus == []:
part = "Unknown"
else:
part = str(partStatus[0])[2:-3]
return part
except UnicodeEncodeError as err:
print(err)
finally:
cursor.close()
conn.close()
def getDistrib(partID):
dbconfig = read_db_config()
try:
conn = MySQLConnection(**dbconfig)
cursor = conn.cursor()
sql = """SELECT D.name, PD.sku, D.skuurl FROM Distributor D
LEFT JOIN PartDistributor PD on D.id = PD.distributor_id
WHERE PD.part_id = {}""".format(partID)
cursor.execute(sql)
distrbs = cursor.fetchall()
unique = []
d = []
distributor = []
for distributor in distrbs:
if distributor[0] not in unique and distributor[0] in preferred:
unique.append(distributor[0])
d.append(distributor)
return d
except UnicodeEncodeError as err:
print(err)
finally:
cursor.close()
conn.close()
def labelsetup():
pdf = FPDF(orientation='P', unit='mm', format='A4')
rows = 4
cols = 3
margin = 4 # In mm
labelWidth = (210 - 2 * margin) / cols
labelHeight = (297 - 2 * margin) / rows
pdf.add_page()
return (labelWidth, labelHeight, pdf)
def picksetup(BOMname, dateBOM, timeBOM):
pdf2 = FPDF(orientation='L', unit='mm', format='A4')
margin = 10 # In mm
pdf2.add_page()
pdf2.set_auto_page_break(1, 4.0)
pdf2.set_font('Courier', 'B', 9)
pdf2.multi_cell(80, 10, BOMname, align="L", border=0)
pdf2.set_auto_page_break(1, 4.0)
pdf2.set_font('Courier', 'B', 9)
pdf2.set_xy(90, 10)
pdf2.multi_cell(30, 10, dateBOM, align="L", border=0)
pdf2.set_xy(120, 10)
pdf2.multi_cell(30, 10, timeBOM, align="L", border=0)
pdf2.set_font('Courier', 'B', 7)
pdf2.set_xy(5, 20)
pdf2.multi_cell(10, 10, "Line", border=1)
pdf2.set_xy(15, 20)
pdf2.multi_cell(40, 10, "Ref", border=1)
pdf2.set_xy(55, 20)
pdf2.multi_cell(95, 10, "Part", border=1)
pdf2.set_xy(150, 20)
pdf2.multi_cell(10, 10, "Stock", border=1)
pdf2.set_xy(160, 20)
pdf2.multi_cell(50, 10, "P/N", border=1)
pdf2.set_xy(210, 20)
pdf2.multi_cell(50, 10, "Location", border=1)
pdf2.set_xy(260, 20)
pdf2.multi_cell(10, 10, "Qty", border=1)
pdf2.set_xy(270, 20)
pdf2.multi_cell(10, 10, "Pick", border=1)
return (pdf2)
def makepick(line, pdf2, pos):
index = ((pos - 1) % 16) + 1
pdf2.set_font('Courier', 'B', 6)
pdf2.set_xy(5, 20 + 10 * index) # Line Number
pdf2.multi_cell(10, 10, str(pos), align="C", border=1)
pdf2.set_xy(15, 20 + 10 * index) # Blank RefDes box
pdf2.multi_cell(40, 10, "", align="L", border=1)
pdf2.set_xy(15, 20 + 10 * index) # RefDes
pdf2.multi_cell(40, 5, line[6], align="L", border=0)
pdf2.set_xy(55, 20 + 10 * index) # Blank Part box
pdf2.multi_cell(95, 10, '', align="L", border=1)
pdf2.set_font('Courier', 'B', 8)
pdf2.set_xy(55, 20 + 10 * index) # Part name
pdf2.multi_cell(95, 5, line[1], align="L", border=0)
pdf2.set_font('Courier', '', 6)
pdf2.set_xy(55, 24 + 10 * index) # Part Description
pdf2.multi_cell(95, 5, line[0][:73], align="L", border=0)
pdf2.set_xy(150, 20 + 10 * index)
pdf2.multi_cell(10, 10, str(line[5]), align="C", border=1) # Stock
pdf2.set_xy(160, 20 + 10 * index)
pdf2.multi_cell(50, 10, '', align="C", border=1) # Blank cell
pdf2.set_xy(160, 23.5 + 10 * index)
pdf2.multi_cell(50, 10, line[2], align="C", border=0) # PartNum
pdf2.set_xy(172, 21 + 10 * index)
if line[2] != "":
pdf2.image('assets/barcodes/' + line[2] + '.png', h=6) # PartNum BC
pdf2.set_xy(210, 20 + 10 * index)
pdf2.multi_cell(50, 10, '', align="C", border=1) # Blank cell
pdf2.set_xy(210, 23.5 + 10 * index)
pdf2.multi_cell(50, 10, line[3], align="C", border=0) # Location
pdf2.set_xy(223, 21 + 10 * index)
if line[3] != "":
pdf2.image(
'assets/barcodes/' + line[3][1:] + '.png', h=6) # Location BC
pdf2.set_font('Courier', 'B', 8)
pdf2.set_xy(260, 20 + 10 * index)
pdf2.multi_cell(10, 10, line[4], align="C", border=1) # Qty
pdf2.set_xy(270, 20 + 10 * index)
pdf2.multi_cell(10, 10, "", align="L", border=1)
pdf2.set_xy(273, 23 + 10 * index)
if line[3] != "":
pdf2.multi_cell(4, 4, "", align="L", border=1)
if index % 16 == 0:
pdf2.add_page()
def makelabel(label, labelCol, labelRow, lblwidth, lblheight, pdf):
'''
Take label info and make a label at position defined by row & column
'''
lineHeight = 3
intMargin = 7
labelx = int((lblwidth * (labelCol % 3)) + intMargin)
labely = int((lblheight * (labelRow % 4)) + intMargin)
pdf.set_auto_page_break(1, 4.0)
pdf.set_font('Courier', 'B', 9)
pdf.set_xy(labelx, labely)
pdf.multi_cell(
lblwidth - intMargin, lineHeight, label[0], align="L", border=0)
pdf.set_font('Courier', '', 8)
pdf.set_xy(labelx, labely + 10)
pdf.cell(lblwidth, lineHeight, label[1], align="L", border=0)
pdf.image('assets/barcodes/' + label[1] + '.png', labelx, labely + 13, 62,
10)
pdf.set_xy(labelx, labely + 25)
pdf.cell(lblwidth, lineHeight, 'Part no: ' + label[2], align="L", border=0)
pdf.set_xy(labelx + 32, labely + 25)
pdf.cell(
lblwidth, lineHeight, 'Location: ' + label[3], align="L", border=0)
pdf.image('assets/barcodes/' + label[2] + '.png', labelx, labely + 28, 28,
10)
pdf.image('assets/barcodes/' + label[3][1:] + '.png', labelx + 30,
labely + 28, 32, 10)
pdf.set_xy(labelx, labely + 40)
pdf.cell(
lblwidth, lineHeight, 'Quantity: ' + label[4], align="L", border=0)
pdf.image('assets/barcodes/' + label[4] + '.png', labelx, labely + 43, 20,
10)
pdf.set_font('Courier', 'B', 8)
pdf.set_xy(labelx + 25, labely + 46)
pdf.multi_cell(35, lineHeight, label[9], align="L", border=0)
pdf.set_xy(labelx, labely + 56)
pdf.multi_cell(
lblwidth - intMargin, lineHeight, label[6], align="L", border=0)
if (labelCol == 2) & ((labelRow + 1) % 4 == 0):
pdf.add_page()
def getTable(partID, q, bcolour, row):
'''
There are 8 columns of manuf data /availability and pricing starts at col 9
Pricing in quanties of 1, 10, 100 - so use log function
background colour already set so ignored.
'''
index = int(log10(q)) + 9
tbl = ""
minPrice = 999999
classtype = ''
pricingExists = False
fn = "./assets/web/" + str(partID) + ".csv"
# If file is empty st_size should = 0 BUT file always contains exactly 1 byte ...
if os.stat(fn).st_size == 1: # File (almost) empty ...
tbl = "<td colspan = " + str(
len(preferred)
) + " class ='lineno' '><b>No data found from preferred providers</b></td>"
return tbl, voidMatrix, voidMatrix, voidMatrix
try:
minData = np.genfromtxt(fn, delimiter=",")
if np.ndim(minData) == 1: # If only one line, nanmin fails
minPrice = minData[index]
else:
minPrice = np.nanmin(minData[:, index])
except (UserWarning, ValueError, IndexError) as error:
print(
"ATTENTION ", error
) # Just fails when empty file or any other error, returning no data
tbl = "<td colspan = " + str(
len(preferred)
) + " class ='lineno'><b>No data found from preferred providers</b></td>"
return tbl, voidMatrix, voidMatrix, voidMatrix
csvFiles = open(fn, "r")
compPrefPrices = list(csv.reader(csvFiles))
line = ""
line2 = ""
n = len(preferred)
_costRow = [0] * n
_coverageRow = [0] * n
_countRow = [0] * n
# line += "<form>"
for d, dist in enumerate(preferred):
line += "<td"
terminated = False
low = 0
magnitude = 0
i = 0
for _comp in compPrefPrices:
price = ""
priceLine = ""
try:
if _comp[0] in dist:
try:
price = str("{0:2.2f}".format(float(_comp[index])))
dispPrice = price
except:
ValueError
price = "-" # DEBUG "-"
try:
priceLine = str("{0:2.2f}".format(
q * float(_comp[index])))
calcPL = priceLine
except:
ValueError
priceLine = "-" # DEBUG "-"
calcPL = "0.0"
if i == 0: # 1st row only being considered
try:
_costRow[d] = q * float(_comp[index])
except:
ValueError
_costRow[d] = 0.0
_coverageRow[d] = 1
_countRow[d] = q
try:
_moq = int(_comp[3])
except:
ValueError
_moq = 999999
if bcolour == 'rgb(238, 232, 213)':
classtype = 'ambig'
else:
classtype = 'mid'
if _comp[index].strip() == str(minPrice):
pricingExists = True
classtype = 'min'
line += " class = '" + classtype + "'>"
line += " <input id = '" + str(d) + "-"+str(row)+"' type='radio' name='" + str(row) + "' value='" + \
calcPL + "' checked >"
else:
pricingExists = True
line += " class = '" + classtype + "'>"
line += " <input id='" + str(d) + "-"+str(row)+"' type='radio' name='" + str(row) + "' value='" + \
calcPL + "'>"
line += "<label for=" + str(d) + "></label>"
line += " <b><a href = '" + _comp[4] + "'>" + _comp[1] + "</a></b><br>"
if price == "-":
line += "<p class ='null' style= 'padding:5px;'> Ea:"
line += "<span style='float: right; text-align: right;'>"
line += "<b >" + price + "</b> "
line += _comp[8]
price = "0"
else:
line += "<p style= 'padding:5px;'> Ea:"
line += "<span style='float: right; text-align: right;'>"
line += "<b>" + price + "</b> "
line += _comp[8]
line += "</span>"
if priceLine == "-":
line += "<p class ='null' style= 'padding:5px;'> Line:"
line += "<span style='float: right; text-align: right;'>"
line += "<b >" + priceLine + "</b> "
line += _comp[8]
priceline = "0"
else:
line += "<p style= 'padding:5px;'> Line:"
line += "<span style='float: right; text-align: right;'>"
line += "<b>" + priceLine + "</b> "
line += _comp[8]
line += "</span>"
line += "<p style= 'padding:5px;'> MOQ: "
line += "<span style='float: right; text-align: right;'><b>" + _comp[3] + "</b>"
if int(q) >= _moq: # MOQ satisfied
line += " <span class = 'icon'>🔹 </span></span><p>"
else:
line += " <span class = 'icon'>🔺 </span></span><p>"
line += "<p style= 'padding:5px;'> Stock:"
line += "<span style='float: right; text-align: right;'><b>"
line += _comp[2] + "</b>"
if int(q) <= int(_comp[2]): # Stock satisfied
line += " <span class = 'icon'>🔹 </span></span><p>"
else:
line += " <span class = 'icon'>🔸 </span></span><p>"
P1 = ""
P2 = ""
magnitude = 10**(index - 9)
if _moq == 999999:
low = q
next = 10 * magnitude
column = int(log10(low) + 9)
elif _moq > magnitude:
low = _moq
next = 10**(ceil(log10(low)))
column = int(log10(low) + 10)
else:
low = magnitude
next = 10 * magnitude
column = int(log10(low) + 9)
try:
if float(_comp[column]) > 1:
P1 = str("{0:2.2f}".format(
float(_comp[column])))
else:
P1 = str("{0:3.3f}".format(
float(_comp[column])))
except:
ValueError
P1 = "-"
line += "<p style='text-align: left; padding:5px;'>" + str(
low) + " +"
line += "<span style='float: right; text-align: right;'><b>" + P1 + "</b> " + _comp[8] + "</span>"
if column <= 12:
try:
if float(_comp[column]) > 1:
P2 = str("{0:2.2f}".format(
float(_comp[column + 1])))
else:
P2 = str("{0:3.3f}".format(
float(_comp[column + 1])))
except:
ValueError
P2 = "-"
line += "<p style='text-align: left; padding:5px;'>" + str(
next) + " +"
line += "<span style='float: right; text-align: right;'><b>" + P2 + "</b> " + _comp[8] + "</span>"
else: # Nasty kludge - relly need to iterate through these to get best deal
if i == 1:
line += "<br><br><br><p style= 'padding:5px;'><b> Alternatives</b><br>"
try:
if _comp[index].strip() == str(minPrice):
if classtype != 'min':
line += "<div class = 'min'>"
price = str("{0:2.2f}".format(float(_comp[index])))
priceLine = str("{0:2.2f}".format(
q * float(_comp[index])))
line += "<p style= 'padding:5px;'><b><a href = '" + _comp[4] + "' > " + _comp[1] + " </b></a><br>"
line += "<p style= 'padding:5px;'> Ea: <b>"
line += price + "</b> " + _comp[8]
except:
ValueError
line += "<p style= 'padding:5px;'><b><a href = '" + _comp[4] + "' > " + _comp[1] + " </b></a><br>"
line += " Pricing N/A"
i += 1
# FIXME This needs to count number of instances
if i <= 3:
terminated = True
else:
terminated = False
except IndexError:
pass
if not terminated:
line += ">"
_costRow[d] = 0
_countRow[d] = 0
_coverageRow[d] = 0
if pricingExists:
line += "<p style='padding:5px;'>"
pricingExists = False
line += "</td>"
# line += "</form>"
tbl += line
return tbl, _costRow, _coverageRow, _countRow
def octopartLookup(partIn, bean):
try:
octoConfig = read_octopart_config()
apikey = (octoConfig['apikey'])
except:
KeyError
print('No Octopart API key in config.ini')
return (2)
try:
currencyConfig = read_currency_config()
locale = (currencyConfig['currency'])
except:
KeyError
print("No currency configured in config.ini")
return (4)
# Get currency rates from European Central Bank
# Fall back on cached cached rates
try:
c = CurrencyConverter(
'http://www.ecb.europa.eu/stats/eurofxref/eurofxref.zip')
except:
URLError
c = CurrencyConverter()
return (8)
# Remove invalid characters
partIn = partIn.replace("/", "-")
path = partIn.replace(" ", "")
web = str("./assets/web/" + path + ".html")
Part = partIn
webpage = open(web, "w")
combo = False
if " " in partIn:
# Possible Manufacturer/Partnumber combo. The Octopart mpn search does not include manufacturer
# Split on space and assume that left part is Manufacturer and right is partnumber.
# Mark as comboPart.
combo = True
comboManf, comboPart = partIn.split(" ")
aside = open("./assets/web/tmp.html", "w")
htmlHeader = """
<!DOCTYPE html>
<html lang = 'en'>
<meta charset="utf-8">
<head>
<html lang="en">
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>Octopart Lookup</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="Description" lang="en" content="Kicad2PartKeepr">
<meta name="author" content="jpateman@gmail.com">
<meta name="robots" content="index, follow">
<!-- icons -->
<link rel="apple-touch-icon" href="assets/img/apple-touch-icon.png">
<link rel="shortcut icon" href="favicon.ico">
<link rel="stylesheet" href="../css/octopart.css">
</head>
<body>
<div class="header">
<h1 class="header-heading">Kicad2PartKeepr</h1>
</div>
<div class="nav-bar">
<div class="container">
<ul class="nav">
</ul>
</div>
</div>
"""
webpage.write(htmlHeader)
##################
bean = False
##################
if bean:
#
url = "https://octopart.com/api/v4/rest/parts/search"
url += '?apikey=' + apikey
url += '&q="' + Part + '"'
url += '&include[]=descriptions'
url += '&include[]=imagesets'
# url += '&include[]=specs'
# url += '&include[]=datasheets'
url += '&country=GB'
elif combo:
#
url = "https://octopart.com/api/v4/rest/parts/match"
url += '?apikey=' + apikey
url += '&queries=[{"brand":"' + comboManf + \
'","mpn":"' + comboPart + '"}]'
url += '&include[]=descriptions'
url += '&include[]=imagesets'
url += '&include[]=specs'
url += '&include[]=datasheets'
url += '&country=GB'
else:
url = "https://octopart.com/api/v4/rest/parts/match"
url += '?apikey=' + apikey
url += '&queries=[{"mpn":"' + Part + '"}]'
url += '&include[]=descriptions'
url += '&include[]=imagesets'
url += '&include[]=specs'
url += '&include[]=datasheets'
url += '&country=GB'
data = urllib.request.urlopen(url).read()
response = json.loads(data.decode('utf8'))
loop = False
for result in response['results']:
for item in result['items']:
if loop:
break
loop = True
partNum = item['mpn']
try:
description = str(item['descriptions'][0].get('value', None))
except:
IndexError
description = ""
try:
brand = str(item['brand']['name'])
except:
IndexError
brand = ""
# Get image (if present). Also need to get attribution for Octopart licensing
try:
# image = str(item['imagesets'][0]['medium_image'].get('url', None))
image = item['imagesets'][0]['large_image']['url']
except:
IndexError
image = ""
try:
credit = item['imagesets'][0]['credit_string']
crediturl = item['imagesets'][0]['credit_url']
except:
IndexError
credit = ""
crediturl = ""
webpage.write(
"<div class='content' id = 'thumbnail'><table class = 'table2'><tr><td style = 'width:100px;'><img src='"
+ image + "' alt='thumbnail'></td><td><h2>" + brand + " " + partNum + "</h2><h4>" +
description + "</h4></td></tr><tr><td style = 'color:#aaa;'>Image: " + credit +"</td><td></td></tr></table></div>")
specfile = open("./assets/web/" + path, 'w')
specfile.write(image)
aside.write(
"<div class = 'aside'><table class='table table-striped'><thead>")
aside.write("<th>Characteristic</th><th>Value</th></thead><tbody>")
for spec in item['specs']:
parm = item['specs'][spec]['metadata']['name']
try:
val = str(item['specs'][spec]['value'][0])
except:
IndexError
val = "Not Listed by Manufacturer"
parameter = (("{:34} ").format(parm))
value = (("{:40}").format(val))
print(("| {:30} : {:120} |").format(parameter, value))
aside.write("<tr><td>" + parameter + "</td><td>" + value +
"</td></tr>")
print(('{:_<162}').format(""))
aside.write("</tbody></table><table class='table table-striped'>")
aside.write(
"<thead><th>Datasheets</th><th>Date</th><th>Pages</th></thead><tbody>"
)
for d, datasheet in enumerate(item['datasheets']):
if d == 1:
specfile.write(',' + datasheet['url'])
try:
if (datasheet['metadata']['date_created']):
dateUpdated = (
datasheet['metadata']['date_created'])[:10]
else:
dateUpdated = "Unknown"
except:
IndexError
dateUpdated = "Unknown"
if datasheet['attribution']['sources'] is None:
source = "Unknown"
else:
source = datasheet['attribution']['sources'][0]['name']
try:
numPages = str(datasheet['metadata']['num_pages'])
except:
TypeError
numPages = "-"
documents = ((
"| {:30.30} {:11} {:12} {:7} {:7} {:1} {:84.84} |").format(
source, " Updated: ", dateUpdated, "Pages: ", numPages,
"", datasheet['url']))
print(documents)
aside.write("<tr><td><a href='" + datasheet['url'] + "'> " +
source + " </a></td><td>" + dateUpdated +
"</td><td>" + numPages + "</td></tr>")
# if loop:
# webpage.write("<table class='table table-striped'>")
# else:
# webpage.write("<p> No Octopart results found </>")
# Header row here
webpage.write(
"<div class ='main'><table><thead><th>Seller</th><th>SKU</th><th>Stock</th><th>MOQ</th><th>Package</th><th>Currency</th><th>1</th><th>10</th><th>100</th><th>1000</th><th>10000</th></thead><tbody>"
)
count = 0
for result in response['results']:
stockfile = open("./assets/web/" + path + '.csv', 'w')
for item in result['items']:
if count == 0:
print(('{:_<162}').format(""))
print(
("| {:24} | {:19} | {:>9} | {:>7} | {:11} | {:5} ").format(
"Seller", "SKU", "Stock", "MOQ", "Package",
"Currency"),
end="")
print(
("| {:>10}| {:>10}| {:>10}| {:>10}| {:>10}|").format(
"1", "10", "100", "1000", "10000"))
print(('{:-<162}').format(""), end="")
count += 1
# Breaks at 1, 10, 100, 1000, 10000
for offer in item['offers']:
loop = 0
_seller = offer['seller']['name']
_sku = (offer['sku'])[:19]
_stock = offer['in_stock_quantity']
_moq = str(offer['moq'])
_productURL = str(offer['product_url'])
_onOrderQuant = offer['on_order_quantity']
# _onOrderETA = offer['on_order_eta']
_factoryLead = offer['factory_lead_days']
_package = str(offer['packaging'])
_currency = str(offer['prices'])
if _moq == "None":
_moq = '-'
if _package == "None":
_package = "-"
# if not _factoryLead or _factoryLead == "None":
# _factoryLead = "-"
# else:
# _factoryLead = int(int(_factoryLead) / 7)
if _seller in preferred:
data = str(_seller) + ", " + str(_sku) + ", " + \
str(_stock) + ", " + str(_moq) + ", " + str(_productURL) + ", " +\
str(_factoryLead) + ", " + str("_onOrderETA") + ", " + str(_onOrderQuant) +\
", " + str(locale)
stockfile.write(data)
print()
print(
("| {:24.24} | {:19} | {:>9} | {:>7} | {:11} |").format(
_seller, _sku, _stock, _moq, _package),
end="")
line = "<tr><td>" + _seller + "</td><td><a target='_blank' href=" + str(
offer['product_url']) + ">" + str(
offer['sku']) + "</a></td><td>" + str(
_stock) + "</td><td>" + str(
_moq) + "</td><td>" + _package + "</td>"
webpage.write(line)
valid = False
points = ['-', '-', '-', '-', '-']
for currency in offer['prices']:
# Some Sellers don't have currency so use this to fill the line
valid = True
if currency == locale:
# Base currency is local
loop += 1
if loop == 1:
print((" {:3} |").format(currency), end="")
webpage.write("<td>" + currency + "</td>")
else:
# Only try and convert first currency
loop += 1
if loop == 1:
print((" {:3}* |").format(locale), end="")
webpage.write("<td>" + locale + "*</td>")
if loop == 1:
for breaks in offer['prices'][currency]:
_moqv = offer['moq']
if _moqv is None:
_moqv = 1
_moqv = int(_moqv)
i = 0
# Break 0 - 9
if breaks[0] < 10:
points[0] = round(
c.convert(breaks[1], currency, locale), 2)
for i in range(0, 4):
points[i + 1] = points[i]
# Break 10 to 99
if breaks[0] >= 10 and breaks[0] < 100:
points[1] = round(
c.convert(breaks[1], currency, locale), 3)
# if _moqv >= breaks[0]:
for i in range(1, 4):
points[i + 1] = points[i]
# Break 100 to 999
if breaks[0] >= 100 and breaks[0] < 1000:
points[2] = round(
c.convert(breaks[1], currency, locale), 4)
# if _moqv >= breaks[0]:
for i in range(2, 4):
points[i + 1] = points[i]
# Break 1000 to 9999
if breaks[0] >= 1000 and breaks[0] < 10000:
points[3] = round(
c.convert(breaks[1], currency, locale), 5)
# if _moqv >= breaks[0]:
points[4] = points[3]
# Break 10000+
if breaks[0] >= 10000:
points[4] = round(
c.convert(breaks[1], currency, locale), 6)
else:
points[4] = points[3]
for i in range(0, 5):
print((" {:>10.5}|").format(points[i]), end="")
if points[i] == '-':
webpage.write("<td>-</td>")
elif float(points[i]) >= 1:
pp = str("{0:.2f}".format(points[i]))
webpage.write("<td>" + pp + "</td>")
else:
webpage.write("<td>" + str(points[i]) +
"</td>")
if _seller in preferred:
stockfile.write(", " + str(points[i]))
webpage.write("</tr>")
if _seller in preferred:
stockfile.write("\n")
if not valid:
print(" |", end="")
webpage.write("<td></td>")
for i in range(0, 5):
print((" {:>10.5}|").format(points[i]), end="")
webpage.write("<td>" + str(points[i]) + "</td>")
if _seller in preferred:
stockfile.write(", " + str(points[i]))
webpage.write("</tr>")
stockfile.write("\n")
valid = False
try:
if _seller in preferred:
stockfile.write("\n")
except:
UnboundLocalError
stockfile.write("\n")
webpage.write("</tbody></table></div>")
print()
print(('{:=<162}').format(""))
aside.close()
side = open("./assets/web/tmp.html", "r")
aside = side.read()
webpage.write(aside + "</tbody></table>")
webpage.write("</div></body></html>")
return
################################################################################
# Further setup or web configuration here
#
#
################################################################################
compliance = {
'Compliant': 'assets/img/rohs-logo.png',
'Non-Compliant': 'assets/img/ROHS_RED.png',
'Unknown': 'assets/img/ROHS_BLACK.png'
}
manufacturing = {
'Obsolete': 'assets/img/FACTORY_RED.png',
'Not Recommended for New Designs': 'assets/img/FACTORY_YELLOW.png',
'Unknown': 'assets/img/FACTORY_BLUE.png',
'Active': 'assets/img/FACTORY_GREEN.png',
'Not Listed by Manufacturer': 'assets/img/FACTORY_PURPLE.png'
}
dateBOM = datetime.now().strftime('%Y-%m-%d')
timeBOM = datetime.now().strftime('%H:%M:%S')
lblwidth, lblheight, pdf = labelsetup()
pdf2 = picksetup(projectName, dateBOM, timeBOM)
labelRow = 0
labelCol = 0
run = 0
web = open("assets/web/temp.html", "w")
accounting = open("assets/web/accounting.html", "w")
missing = open("assets/web/missing.csv", "w")
under = open("assets/web/under.csv", "w")
under.write('Name,Description,Quantity,Stock,Min Stock\n')
htmlHeader = """
<!DOCTYPE html>
<meta charset="utf-8">
<html lang = 'en'>
<head>
<link rel="stylesheet" href="assets/css/web.css">
<title>Kicad 2 PartKeepr</title>
<script src="https://code.jquery.com/jquery-3.3.1.min.js" integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8=" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.4.0/Chart.min.js"></script>
</head>
<body>
<div class="header">
<h1 class="header-heading">Kicad2PartKeepr</h1>
</div>
<div class="nav-bar">
<ul class="nav">
<li><a href='labels.pdf' download='labels.pdf'>Labels</a></li>
<li><a href='picklist.pdf' download='picklist.pdf'>Pick List</a></li>
<li><a href='assets/web/missing.csv' download='missing.csv'>Missing parts list</a></li>
<li><a href='assets/web/under.csv' download='under.csv'>Understock list</a></li>
<li><a href='assets/web/""" + projectName + """_PK.csv'> PartKeepr import </a></li>
</ul>
</div>
<h2>Project name: """ + projectName + \
"</h2><h3>Date: " + dateBOM + \
"</h3><h3>Time: " + timeBOM + "</h3>"
htmlBodyHeader = '''
<table class='main' id='main'>
'''
labelCol = 0
htmlAccountingHeader = '''
<table class = 'accounting'>
'''
resistors = ["R_1206", "R_0805", "R_0603", "R_0402"]
capacitors = ["C_1206", "C_0805", "C_0603", "C_0402"]
def punctuate(value):
if "." in value:
multiplier = (value.strip()[-1])
new_string = "_" + (re.sub("\.", multiplier, value))[:-1]
else:
new_string = "_" + value
return (new_string)
def get_choice(possible):
print(
"More than one component in the PartKeepr database meets the criteria:"
)
i = 1
for name, description, stockLevel, minStockLevel, averagePrice, partNum, storage_locn, PKid, Manufacturer in possible:
# print(i, " : ", name, " : ", description,
# " [Location] ", storage_locn, " [Part Number] ", partNum, " [Stock] ", stockLevel)
# print(("{:3} {:25} {:50.50} Location {:10} Manf {:12} HPN {:5} Stock {:5} Price(av) {:3s} {:6}").format(i, name, description, storage_locn, Manufacturer, partNum, stockLevel, baseCurrency, averagePrice))
print(i, name, description )
# subprocess.call(['/usr/local/bin/pyparts', 'specs', name])
i = i + 1
print("Choose which component to add to BOM (or 0 to defer)")
while True:
choice = int(input('> '))
if choice == 0:
return (possible)
if choice < 0 or choice > len(possible):
continue
break
i = 1
for name, description, stockLevel, minStockLevel, averagePrice, partNum, storage_locn, PKid, Manufacturer in possible:
possible = (name, description, stockLevel, minStockLevel, averagePrice,
partNum, storage_locn, PKid, Manufacturer)
if i == choice:
possible = (name, description, stockLevel, minStockLevel,
averagePrice, partNum, storage_locn, PKid,
Manufacturer)
print("Selected :")
print(possible[0], " : ", possible[1])
return [possible]
i = i + 1
def find_part(part_num):
dbconfig = read_db_config()
try:
conn = MySQLConnection(**dbconfig)
cursor = conn.cursor()
bean = False
if (part_num[:6]) in resistors:
quality = "Resistance"
variant = "Resistance Tolerance"
bean = True
if (part_num[:6]) in capacitors:
quality = "Capacitance"
variant = "Dielectric Characteristic"
bean = True
if (bean):
component = part_num.split('_')
if (len(component)) <= 2:
print(
"Insufficient parameters (Needs 3 or 4) e.g. R_0805_100K(_±5%)"
)
return ("0")
c_case = component[1]
c_value = convert_units(component[2])
if (len(component)) == 4:
c_characteristics = component[3]
# A fully specified 'bean'
sql = """SELECT DISTINCT P.name, P.description, P.stockLevel, P.minStockLevel, P.averagePrice, P.internalPartNumber, S.name, P.id, M.name
FROM Part P
JOIN PartParameter R ON R.part_id = P.id
JOIN StorageLocation S ON S.id = P.storageLocation_id
LEFT JOIN PartManufacturer PM on PM.part_id = P.id
LEFT JOIN Manufacturer M on M.id = PM.manufacturer_id
WHERE
(R.name = 'Case/Package' AND R.stringValue='{}') OR
(R.name = '{}' AND R.normalizedValue = '{}') OR
(R.name = '{}' AND R.stringValue = '%{}')
GROUP BY P.id, M.id, S.id
HAVING
COUNT(DISTINCT R.name)=3""".format(
c_case, quality, c_value, variant, c_characteristics)
else:
# A partially specified 'bean'
sql = """SELECT DISTINCT P.name, P.description, P.stockLevel, P.minStockLevel, P.averagePrice, P.internalPartNumber, S.name, P.id, M.name
FROM Part P
JOIN PartParameter R ON R.part_id = P.id
JOIN StorageLocation S ON S.id = P.storageLocation_id
LEFT JOIN PartManufacturer PM on PM.part_id = P.id
LEFT JOIN Manufacturer M on M.id = PM.manufacturer_id
WHERE
(R.name = 'Case/Package' AND R.stringValue='{}') OR
(R.name = '{}' AND R.normalizedValue = '{}')
GROUP BY P.id, M.id, S.id
HAVING
COUNT(DISTINCT R.name)=2""".format(
c_case, quality, c_value)
else:
sql = """SELECT DISTINCT P.name, P.description, P.stockLevel, P.minStockLevel, P.averagePrice, P.internalPartNumber, S.name, P.id, M.name
FROM Part P
JOIN StorageLocation S ON S.id = P.storageLocation_id
LEFT JOIN PartManufacturer PM on PM.part_id = P.id
LEFT JOIN Manufacturer M on M.id = PM.manufacturer_id
WHERE P.name LIKE '%{}%'
GROUP BY P.id, M.id, S.id""".format(part_num)
cursor.execute(sql)
components = cursor.fetchall()
return (components, bean)
except UnicodeEncodeError as err:
print(err)
finally:
cursor.close()
conn.close()
###############################################################################
#
# Main part of program follows
#
#
###############################################################################
with open(file_name, newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
headers = reader.fieldnames
filename, file_extension = os.path.splitext(file_name)
outfile = open(
"./assets/web/" + filename + '_PK.csv',
'w',
newline='',
encoding='utf-8')
writeCSV = csv.writer(outfile, delimiter=',', lineterminator='\n')
labelpdf = labelsetup()
# Initialise accounting values
countParts = 0
count_BOMLine = 0
count_NPKP = 0
count_PKP = 0
count_LowStockLines = 0
count_PWP = 0
bomCost = 0
for row in reader:
if not row:
break
new_string = ""
part = row['Part#']
value = row['Value']
footprint = row['Footprint']
datasheet = row['Datasheet']
characteristics = row['Characteristics']
references = row['References']
quantity = row['Quantity Per PCB']
# Need sufficient info to process. Some .csv reprocessing adds in lines
# of NULL placeholders where there was a blank line.
if part == "" and value == "" and footprint == "":
break
count_BOMLine = count_BOMLine + 1
if footprint in resistors:
if value.endswith("Ω"): # Remove trailing 'Ω' (Ohms)
value = (value[:-1])
new_string = punctuate(value)
if footprint in capacitors:
if value.endswith("F"): # Remove trailing 'F' (Farads)
value = (value[:-1])
new_string = punctuate(value)
if characteristics is None:
if characteristics != "-":
new_string = new_string + "_" + str(characteristics)
if part == "-":
part = (str(footprint) + new_string)
if references is None:
break
component_info, species = find_part(part)
n_components = len(component_info)
quantity = int(quantity)
# Print to screen
print(('{:=<162}').format(""))
print(("| BOM Line number : {:3} {:136} |").format(count_BOMLine, ""))
print(('{:_<162}').format(""))
print(("| {:100} | {:13.13} | | Req = {:5}|"
).format(references, part, quantity))
print(('{:_<162}').format(""))
uniqueNames = []
if n_components == 0:
print(("| {:100} | {:21} | {:16} | {:12} |").format(
"No matching parts in database", "", "", ""))
print(('{:_<162}').format(""))
octopartLookup(part, species)
print('\n\n')
else:
for (name, description, stockLevel, minStockLevel, averagePrice,
partNum, storage_locn, PKid, Manufacturer) in component_info:
ROHS = partStatus(PKid, 'RoHS')
Lifecycle = partStatus(PKid, 'Lifecycle Status')
# Can get rid of loop as never reset now
print((
"| {:100} | Location = {:10} | Part no = {:6} | Stock = {:5}|"
).format(description, storage_locn, partNum, stockLevel))
print(('{:_<162}').format(""))
print(("| Manufacturing status: {} {:<136}|").format(
"", Lifecycle))
print(("| RoHS: {}{:<153}|").format("", ROHS))
print(("| Name: {}{:<153}|").format("", name))
getDistrib(PKid)
print(('{:_<162}').format(""))
octopartLookup(name, species)
print('\n\n')
# More than one matching component exists - prompt user to choose
if len(component_info) >= 2:
component_info = get_choice(component_info)
for (name, description, stockLevel, minStockLevel, averagePrice,
partNum, storage_locn, PKid, Manufacturer) in component_info:
ROHS = partStatus(PKid, 'RoHS')
Lifecycle = partStatus(PKid, 'Lifecycle Status')
if n_components != 0 and (quantity > stockLevel):
count_LowStockLines = count_LowStockLines + 1
background = lowstock # Insufficient stock : pinkish
under.write(name + ',' + description + ',' + str(quantity) + ',' +
str(stockLevel) + ',' + str(minStockLevel) + '\n')
else:
background = adequate # Adequate stock : greenish
countParts = countParts + quantity
quantity = str(quantity)
if run == 0:
preferred = preferred.split(",")
web.write("<th class = 'thmain' colspan = '6'> Component details</th>")
web.write("<th class = 'thmain' colspan ='" + str(len(preferred) + 2) +
"'> Supplier details</th>")
web.write(
"<th class = 'stock' colspan ='5'> PartKeepr Stock</th></tr>")
web.write("""
<tr>
<th class = 'thmain' style = 'width: 2%;'>No</th>
<th class = 'thmain' style = 'width: 5%;'>Part</th>
<th class = 'thmain' style = 'width : 13%;'>Description</th>
<th class = 'thmain' style = 'width : 5%;'>References</th>
<th class = 'thmain' style = 'width : 4%;'>RoHS</th>
<th class = 'thmain' style = 'width : 4%;'>Lifecycle</th>
""")
i = 0
while i < len(preferred):
web.write("<th class = 'thmain' style = 'width : 10%;'>" + preferred[i] + "</th>")
i += 1
web.write("""<th class = 'thmain' style = 'width : 3%;'>Exclude</th>
<th class = 'thmain' style = 'width : 3%;'>Qty</th>
<th class = 'stock' style = 'width : 3%;'>Each</th>
<th class = 'stock' style = 'width : 3%;'>Line</th>
<th class = 'stock' style = 'width : 3%;'>Stock</th>
<th class = 'stock' style = 'width : 3%;'>PAR</th>
<th class = 'stock' style = 'width : 3%;'>Net</th>
</tr></th>""")
run = 1
# No PK components fit search criteria. Deal with here and drop before loop.
# Not ideal but simpler.
if n_components == 0:
count_NPKP = count_NPKP + 1
averagePrice = 0
background = nopkstock
nopk = [
'*** NON-PARTKEEPR PART ***', part, "", "", "", "", references,
"", "", ""
]
web.write("<tr style = 'background-color : " + background + ";'>")
web.write("<td class = 'lineno'>" + str(count_BOMLine) + " </td>")
web.write(
"<td style = 'background-color : #fff; vertical-align:middle;'><img src = 'assets/img/noimg.png' alt='none'></td>"
)
web.write("<td class = 'partname' ><b>" + part + "</b>")
web.write("<p class = 'descr'>Non PartKeepr component</td>")
web.write("<td class = 'refdes'" + references + "</td>")
web.write("<td class = 'ROHS'>NA</td>")
web.write("<td class = 'ROHS'>NA</td>")
for i, d in enumerate(preferred):
web.write("<td></td>")
web.write("<td class = 'stck'>-</td>")
web.write("<td class = 'stck'> " + quantity + "</td>")
web.write("<td class = 'stck'>-</td>")
web.write("<td class = 'stck'>-</td>")
web.write("<td class = 'stck'>-</td>")
web.write("<td class = 'stck'>-</td>")
web.write("<td class = 'stck'>-</td></tr>")
makepick(nopk, pdf2, count_BOMLine)
missing.write(part + ',' + quantity + ',' + references + '\n')
name = "-"
if n_components > 1: # Multiple component fit search criteria - set brown background -
# TODO Make colors configurable in ini
background = multistock
columns = str(len(preferred) + 13)
if len(component_info) == 1:
# More than one line exists but has been disambiguated at runtime
web.write(
"<tr><td colspan='" + columns +
"' style = 'font-weight : bold; background-color : " +
background + ";'><nbsp><nbsp><nbsp><nbsp><nbsp>" +
str(n_components) +
" components meet the selection criteria but the following line was selected at runtime</td></tr>"
)
else:
web.write(
"<tr style = 'background-color : " + background +
";'><td colspan='" + columns +
"' style = 'font-weight : bold;'> <nbsp><nbsp><nbsp><nbsp><nbsp>The following "
+ str(n_components) +
" components meet the selection criteria *** Use only ONE line *** </td></tr>"
)
i = 0
for (name, description, stockLevel, minStockLevel, averagePrice,
partNum, storage_locn, PKid, Manufacturer) in component_info:
if n_components > 1:
web.write("<tr style = 'background-color : " + background + ";'>")
else:
web.write("<tr>")
web.write("<td class = 'lineno'>" + str(count_BOMLine) + " </td>")
if i == 0: # 1st line where multiple components fit search showing RefDes
name_safe = name.replace("/", "-")
name_safe = name.replace(" ", "")
try:
f = open("./assets/web/" + name_safe, 'r')
imageref = f.read()
except:
FileNotFoundError
try:
imageref, datasheet = imageref.split(',')
except ValueError:
datasheet = ''
web.write("<td class = 'center'>")
if imageref:
web.write("<img src = '" + imageref + "' alt='thumbnail' >")
else:
web.write(
"<img src = 'assets/img/noimg.png' alt='none'>")
imageref = ""
web.write("</td>")
web.write("<td class = 'partname'><a href = 'assets/web/" + name_safe +
".html'>" + name + "</a>")
if Manufacturer:
web.write("<p class='manf'>" + Manufacturer + "</p>")
i = i + 1
count_PKP = count_PKP + 1
else: # 2nd and subsequent lines where multiple components fit search showing RefDes
web.write("<td>")
name_safe = name.replace("/", "-")
f = open("./assets/web/" + name_safe, 'r')
imageref = f.read()
if imageref:
web.write("<img src = '" + imageref + "' alt='thumbnail'>")
web.write("<td><a href = 'assets/web/" + name_safe +
".html'>" + name + "</a>")
if Manufacturer:
web.write("<p class = 'manf'" + Manufacturer + "</>")
invalidate_BOM_Cost = True
lineCost = float(averagePrice) * int(quantity)
if lineCost == 0:
count_PWP += 1
web.write("<p class = 'descr'>" + description + "</p>")
if datasheet:
web.write("<a href='" + datasheet +
"'><img class = 'partname' src = 'assets/img/pdf.png' alt='datasheet' ></a>")
web.write("</td>")
web.write("<td class = 'refdes'>" + references + "</td>")
# TODO Probably could be more elegant
if ROHS == "Compliant":
ROHSClass = 'compliant'
_cIndicator = "C_cp"
elif ROHS == "Non-Compliant":
ROHSClass = 'uncompliant'
_cIndicator = "C_nc"
else:
ROHS = "Unknown"
ROHSClass = 'unknown'
_cIndicator = "C_nk"
web.write("<td id ='" + _cIndicator + str(count_BOMLine) +
"' class = '" + ROHSClass + "'>" +
ROHS)
if ROHS == "Compliant":
web.write("<br><br><img src = 'assets/img/rohs-logo.png' alt='compliant'>")
web.write("</td>")
if Lifecycle == "Active":
LifeClass = 'active'
_pIndicator = "P_ac"
elif Lifecycle == "EOL":
LifeClass = 'eol'
_pIndicator = "P_el"
elif Lifecycle == "NA":
Lifecycle = "Unknown"
LifeClass = 'unknown'
_pIndicator = "P_nk"
else:
LifeClass = 'unknown'
_pIndicator = "P_nk"
web.write("<td id = '" + _pIndicator + str(count_BOMLine) +
"' class = '" + LifeClass + "'>" + Lifecycle +
"</td>")
# Part number exists, therefore generate bar code
# Requires Zint >1.4 - doesn't seem to like to write to another directory.
# Ugly hack - write to current directory and move into place.
if partNum:
part_no = partNum
# part_no = (partNum[1:])
subprocess.call([
'/usr/local/bin/zint', '--filetype=png', '--notext', '-w',
'10', '--height', '20', '-o', part_no, '-d', partNum
])
os.rename(part_no + '.png',
'assets/barcodes/' + part_no + '.png')
# Storage location exists, therefore generate bar code. Ugly hack - my location codes start with
# a '$' which causes problems. Name the file without the leading character.
if storage_locn != "":
locn_trim = ""
locn_trim = (storage_locn[1:])
subprocess.call([
'/usr/local/bin/zint', '--filetype=png', '--notext', '-w',
'10', '--height', '20', '-o', locn_trim, '-d', storage_locn
])
os.rename(locn_trim + '.png',
'assets/barcodes/' + locn_trim + '.png')
table, priceRow, coverage, items = getTable(
name_safe, int(quantity), background, count_BOMLine)
web.write(str(table))
costMatrix = [sum(x) for x in zip(priceRow, costMatrix)]
coverageMatrix = [
sum(x) for x in zip(coverage, coverageMatrix)
]
countMatrix = [sum(x) for x in zip(items, countMatrix)]
# NOTE Do I need to think about building matrix by APPENDING lines.
avPriceFMT = str(('{:3s} {:0,.2f}').format(
baseCurrency, averagePrice))
linePriceFMT = str(('{:3s} {:0,.2f}').format(
baseCurrency, lineCost))
bomCost = bomCost + lineCost
averagePrice = str('{:0,.2f}').format(averagePrice)
lineCost = str('{:0,.2f}').format(lineCost)
web.write("<td class = 'lineno'><input type='checkbox' name='ln"+str(count_BOMLine)+"' value='"+str(count_BOMLine)+"'></td>")
web.write("<td class = 'stck'> " + quantity + "</td>")
web.write("<td class = 'stck'><b>" + averagePrice + "</b> " + baseCurrency + "</td>")
web.write("<td class = 'stck'><b>" + lineCost + "</b> " + baseCurrency + "</td>")
if int(quantity) >= stockLevel:
qtyCol = '#dc322f'
else:
qtyCol = "#859900"
if (int(stockLevel) - int(quantity)) <= int(minStockLevel):
minLevelCol = '#dc322f'
elif (int(stockLevel) -
int(quantity)) <= int(minStockLevel) * 1.2:
minLevelCol = "#cb4b16"
else:
minLevelCol = "#859900"
web.write(
"<td class = 'stck' style = 'font-weight : bold; text-align: right;'>"
+ str(stockLevel) + "</td>")
web.write(
"<td class = 'stck' style = 'font-weight : bold; text-align: right;'>"
+ str(minStockLevel) + "</td>")
web.write(
"<td class = 'stck' style = 'font-weight : bold; text-align: right; color:"
+ minLevelCol + "'>" +
str(int(stockLevel) - int(quantity)) + "</td>")
else:
# No storage location
web.write("<td class = 'stck'>NA</td>")
web.write('</tr>\n')
# Make labels for packets (need extra barcodes here)
name = name.replace("/", "-") # Deal with / in part names
subprocess.call([
'/usr/local/bin/zint', '--filetype=png', '--notext', '-w',
'10', '--height', '20', '-o', name, '-d', name
])
os.rename(name + '.png', 'assets/barcodes/' + name + '.png')
subprocess.call([
'/usr/local/bin/zint', '--filetype=png', '--notext', '-w',
'10', '--height', '20', '-o', quantity, '-d', quantity
])
os.rename(quantity + '.png',
'assets/barcodes/' + quantity + '.png')
# Write out label pdf
lb2 = []
lb2.append(description[:86].upper())
lb2.append(name)
lb2.append(part_no)
lb2.append(storage_locn)
lb2.append(quantity)
lb2.append(stockLevel)
lb2.append(references)
lb2.append(dateBOM)
lb2.append(timeBOM)
lb2.append(filename)
makelabel(lb2, labelCol, labelRow, lblwidth, lblheight, pdf)
makepick(lb2, pdf2, count_BOMLine)
labelCol = labelCol + 1
if labelCol == 3:
labelRow += 1
labelCol = 0
# Prevent variables from being recycled
storage_locn = ""
partNum = ""
part_no = ""
qty = str(int(quantity) / numBoards)
writeCSV.writerow([references, name, qty])
references = ""
name = ""
quantity = ""
# Write out footer for webpage
web.write("<tr id = 'supplierTotal'>")
web.write("<td style = 'border: none;'></td>" * 4)
web.write("<td colspan ='2'>Total for Supplier</td>")
for p, d in enumerate(preferred):
web.write("<td style='text-align: right;'><b><span id='t" + str(p) +
"'></span></b><span style='text-align: right;'> " +
baseCurrency + "</span></td>")
web.write("<td style = 'border: none;'></td>" * 7)
web.write("</tr>")
web.write("<tr id = 'linesCoverage'>")
web.write("<td style = 'border: none;'></td>" * 4)
web.write("<td colspan ='2'>Coverage by Lines</td>")
for p, d in enumerate(preferred):
coveragePC = str(int(round((coverageMatrix[p] / count_BOMLine) * 100)))
web.write("<td style='text-align: right;'><span class ='value'>" +
coveragePC + "</span>%</td>")
web.write("<td style = 'border: none;'></td>" * 7)
web.write("</tr>")
web.write("<tr id = 'itemsCoverage'>")
web.write("<td style = 'border: none;'></td>" * 4)
web.write("<td colspan ='2'>Coverage by Items</td>")
for p, d in enumerate(preferred):
countPC = str(int(round((countMatrix[p] / countParts) * 100)))
web.write("<td style='text-align: right;'>" + countPC + "%</td>")
web.write("<td style = 'border: none;'></td>" * 7)
web.write("</tr>")
web.write("<tr id = 'linesSelected'>")
web.write("<td style = 'border: none;'></td>" * 4)
web.write("<td colspan ='2'>Lines selected</td>")
for p, d in enumerate(preferred):
web.write("<td style='text-align: right;'><span id ='cz" + str(p) +
"'></span></td>")
web.write("<td style = 'border: none;'></td>" * 7)
web.write("</tr>")
web.write("<tr id = 'linesPercent'>")
web.write("<td style = 'border: none;'></td>" * 4)
web.write("<td colspan ='2'>Percent selected (by lines)</td>")
for p, d in enumerate(preferred):
web.write("<td style='text-align: right;'><span id ='cc" + str(p) +
"'></span>%</td>")
web.write("<td style = 'border: none;'></td>" * 7)
web.write("</tr>")
web.write("<tr id = 'pricingSelected'>")
web.write("<td style = 'border: none;'></td>" * 4)
web.write("<td colspan ='2'>Total for Selected</td>")
for p, d in enumerate(preferred):
web.write("<td style='text-align: right;'><b><span id='c" + str(p) +
"'></span></b><span style='text-align: right;'> " +
baseCurrency + "</span></td>")
web.write("<td style = 'border: none;'></td>" * 7)
web.write("</tr>")
# I am really struggling with using js arrays .....
script = '''
<script>
$(document).ready(function() {
bomlines = $('#BOMLines').text();
bomlines = Number(bomlines);
numboards = $('#numboards').text();
numboards = Number(numboards);
excluded = $(':checkbox:checked').length;
excluded = Number(excluded);
bomlines = bomlines - excluded;
providers = Number($("#providers th").length) - 15;
var compliance_cp = $("[id*=C_cp]").length;
var compliance_nc = $("[id*=C_nc]").length;
var compliance_nk = $("[id*=C_nk]").length;
var production_ip = $("[id*=P_ac]").length;
var production_el = $("[id*=P_el]").length;
var production_nk = $("[id*=P_nk]").length;
line0 = $('#linesCoverage').find("td:eq(5)").find('span.value').text();
line1 = $('#linesCoverage').find("td:eq(6)").find('span.value').text();
line2 = $('#linesCoverage').find("td:eq(7)").find('span.value').text();
line3 = $('#linesCoverage').find("td:eq(8)").find('span.value').text();
line4 = $('#linesCoverage').find("td:eq(9)").find('span.value').text();
var total = 0;
var totalDisp = 0;
var totalPerBoard = 0;
var t0 = 0;
var t1 = 0;
var t2 = 0;
var t3 = 0;
var t4 = 0;
var c0 = 0;
var c1 = 0;
var c2 = 0;
var c3 = 0;
var c4 = 0;
var cz0 = $("[id*=0-]:radio:checked:not(:hidden)").length;
var cz1 = $("[id*=1-]:radio:checked:not(:hidden)").length;
var cz2 = $("[id*=2-]:radio:checked:not(:hidden)").length;
var cz3 = $("[id*=3-]:radio:checked:not(:hidden)").length;
var cz4 = $("[id*=4-]:radio:checked:not(:hidden)").length;
var cc0 = Math.round(Number(cz0) / bomlines * 100);
var cc1 = Math.round(Number(cz1) / bomlines * 100);
var cc2 = Math.round(Number(cz2) / bomlines * 100);
var cc2 = Math.round(Number(cz2) / bomlines * 100);
var cc3 = Math.round(Number(cz3) / bomlines * 100);
var cc4 = Math.round(Number(cz4) / bomlines * 100);
$(":radio:checked").each(function() {
total += Number(this.value);
total = Math.round(total * 100) / 100;
totalDisp = total.toFixed(2);
totalPerBoard = (total/numboards).toFixed(2);
});
$("[id*=0-]:radio:checked").each(function() {
c0 += Number(this.value);
c0 = Math.round(c0 * 100) / 100;
});
$("[id*=0-]:radio").each(function() {
t0 += Number(this.value);
t0 = Math.round(t0 * 100) / 100;
});
$("[id*=1-]:radio:checked").each(function() {
c1 += Number(this.value);
c1 = Math.round(c1 * 100) / 100;
});
$("[id*=1-]:radio").each(function() {
t1 += Number(this.value);
t1 = Math.round(t1 * 100) / 100;
});
$("[id*=2-]:radio:checked").each(function() {
c2 += Number(this.value);
c2 = Math.round(c2 * 100) / 100;
});
$("[id*=2-]:radio").each(function() {
t2 += Number(this.value);
t2 = Math.round(t2 * 100) / 100;
});
$("[id*=3-]:radio:checked").each(function() {
c3 += Number(this.value);
c3 = Math.round(c3 * 100) / 100;
});
$("[id*=3-]:radio:checked").each(function() {
t3 += Number(this.value);
t3 = Math.round(t3 * 100) / 100;
});
$("[id*=4-]:radio:checked").each(function() {
c4 += Number(this.value);
c4 = Math.round(c4 * 100) / 100;
});
$("[id*=4-]:radio:checked").each(function() {
t4 += Number(this.value);
t4 = Math.round(t4 * 100) / 100;
});
var t0Disp = t0.toFixed(2);
var t1Disp = t1.toFixed(2);
var t2Disp = t2.toFixed(2);
var t3Disp = t3.toFixed(2);
var t4Disp = t4.toFixed(2);
var c0Disp = c0.toFixed(2);
var c1Disp = c1.toFixed(2);
var c2Disp = c2.toFixed(2);
var c3Disp = c3.toFixed(2);
var c4Disp = c4.toFixed(2);
$("#total").text(totalDisp);
$("#totalPerBoard").text(totalPerBoard);
$("#c0").text(c0Disp);
$("#c1").text(c1Disp);
$("#c2").text(c2Disp);
$("#c3").text(c3Disp);
$("#c4").text(c4Disp);
$("#cz0").text(cz0);
$("#cz1").text(cz1);
$("#cz2").text(cz2);
$("#cz3").text(cz3);
$("#cz4").text(cz4);
$("#cc0").text(cc0);
$("#cc1").text(cc1);
$("#cc2").text(cc2);
$("#cc3").text(cc3);
$("#cc4").text(cc4);
$("#t0").text(t0Disp);
$("#t1").text(t1Disp);
$("#t2").text(t2Disp);
$("#t3").text(t3Disp);
$("#t4").text(t4Disp);
var options =
{
responsive: false,
maintainAspectRatio: false,
scales: {
yAxes: [{
ticks: {
beginAtZero:true
}
}]
}
};
var ctx = document.getElementById('pricing').getContext('2d');
var datapoints = [t0, t1, t2, t3, t4];
var chart = new Chart(ctx, {
// The type of chart we want to create
type: 'bar',
// The data for our dataset
data: {
labels: ["Newark", "Farnell", "DigiKey", "RS Components", "Mouser"],
datasets: [{
label: "Pricing",
backgroundColor: [
'rgb(133, 153, 0)',
'rgb(42, 161, 152)',
'rgb(38, 139, 210)',
'rgb(108, 113, 196)',
'rgb(211, 54, 130)',
'rgb(220, 50, 47)'
],
borderColor: 'rgb(88, 110, 117)',
data: datapoints,
}]
},
options: {
legend: {
display: false
},
title: {
display: true,
text: 'Cost of BOM by provider'
}
}
});
var ctx2 = document.getElementById('coverage').getContext('2d');
var coverage = [line0, line1, line2, line3, line4];
var chart = new Chart(ctx2, {
type: 'bar',
data: {
labels: ["Newark", "Farnell", "DigiKey", "RS Components", "Mouser"],
datasets: [{
label: "Coverage",
backgroundColor: [
'rgb(133, 153, 0)',
'rgb(42, 161, 152)',
'rgb(38, 139, 210)',
'rgb(108, 113, 196)',
'rgb(211, 54, 130)',
'rgb(220, 50, 47)'
],
borderColor: 'rgb(88, 110, 117)',
data: coverage,
}]
},
options: {
legend: {
display: false
},
title: {
display: true,
text: 'Coverage of BOM by Lines (%)'
}
}
});
var ctx3 = document.getElementById('compliance').getContext('2d');
var compliant = [compliance_cp, compliance_nc, compliance_nk]
var data1 = {
datasets: [{
backgroundColor: [
'#2aa198',
'#586e75',
'#073642'],
data: compliant,
}],
labels: [
'Compliant',
'Non-Compliant',
'Unknown'
]
};
var compliance = new Chart(ctx3, {
type: 'doughnut',
data: data1,
options: {
title: {
display: true,
text: 'ROHS Compliance'
}
}
});
var ctx4 = document.getElementById('lifecycle').getContext('2d');
var life = [production_ip, production_el, production_nk]
var data2 = {
datasets: [{
backgroundColor: [
'#2aa198',
'#d33682',
'#073642'],
data: life
}],
labels: [
'Active',
'EOL',
'Unknown'
]
};
var lifecycle = new Chart(ctx4, {
type: 'doughnut',
data: data2,
options: {
title: {
display: true,
text: 'Production Status'
}
}
});
});
$(":radio, :checkbox").on("change", function() {
var t0 = 0;
var t1 = 0;
var t2 = 0;
var t3 = 0;
var t4 = 0;
var c0 = 0;
var c1 = 0;
var c2 = 0;
var c3 = 0;
var c4 = 0;
$('#main tr').filter(':has(:checkbox)').find('radio,.td,.min,.mid,.radbut,.min a,.min p,.mid a,.mid p, .ambig, .ambig a, .ambig p, .null p').removeClass("deselected");
$('#main tr').filter(':has(:checkbox)').find('.icon').show();
$('#main tr').filter(':has(:checkbox)').find(':radio').show();
$('#main tr').filter(':has(:checkbox:checked)').find('radio,.td,.min,.mid,.radbut,.min a,.min p,.mid a,.mid p,.ambig,.ambig a,.ambig p,.null p').addClass("deselected");
$('#main tr').filter(':has(:checkbox:checked)').find('.icon').hide();
$('#main tr').filter(':has(:checkbox:checked)').find(':radio').hide();
var compliance_cp = $("[id*=C_cp]").length;
var compliance_nc = $("[id*=C_nc]").length;
var compliance_nk = $("[id*=C_nk]").length;
var production_ip = $("[id*=P_ac]").length;
var production_el = $("[id*=P_el]").length;
var production_nk = $("[id*=P_nk]").length;
line0 = $('#linesCoverage').find("td:eq(5)").find('span.value').text();
line1 = $('#linesCoverage').find("td:eq(6)").find('span.value').text();
line2 = $('#linesCoverage').find("td:eq(7)").find('span.value').text();
line3 = $('#linesCoverage').find("td:eq(8)").find('span.value').text();
line4 = $('#linesCoverage').find("td:eq(9)").find('span.value').text();
bomlines = $('#BOMLines').text();
bomlines = Number(bomlines);
numboards = $('#numboards').text();
numboards = Number(numboards);
excluded = $(':checkbox:checked').length;
excluded = Number(excluded);
bomlines = bomlines - excluded;
var total = 0;
var totalDisp = 0;
var totalPerBoard = 0;
var cz0 = $("[id*=0-]:radio:checked:not(:hidden)").length;
var cz1 = $("[id*=1-]:radio:checked:not(:hidden)").length;
var cz2 = $("[id*=2-]:radio:checked:not(:hidden)").length;
var cz3 = $("[id*=3-]:radio:checked:not(:hidden)").length;
var cz4 = $("[id*=4-]:radio:checked:not(:hidden)").length;
var cc0 = Math.round(Number(cz0) / bomlines * 100);
var cc1 = Math.round(Number(cz1) / bomlines * 100);
var cc2 = Math.round(Number(cz2) / bomlines * 100);
var cc2 = Math.round(Number(cz2) / bomlines * 100);
var cc3 = Math.round(Number(cz3) / bomlines * 100);
var cc4 = Math.round(Number(cz4) / bomlines * 100);
$(":radio:checked:not(:hidden)").each(function() {
total += Number(this.value);
total = Math.round(total * 100) / 100;
totalDisp = total.toFixed(2);
totalPerBoard = (total/numboards).toFixed(2);
});
$("[id*=0-]:radio:checked:not(:hidden)").each(function() {
c0 += Number(this.value);
c0 = Math.round(c0 * 100) / 100;
});
$("[id*=0-]:radio:not(:hidden)").each(function() {
t0 += Number(this.value);
t0 = Math.round(t0 * 100) / 100;
});
$("[id*=1-]:radio:checked:not(:hidden)").each(function() {
c1 += Number(this.value);
c1 = Math.round(c1 * 100) / 100;
});
$("[id*=1-]:radio:not(:hidden)").each(function() {
t1 += Number(this.value);
t1 = Math.round(t1 * 100) / 100;
});
$("[id*=2-]:radio:checked:not(:hidden)").each(function() {
c2 += Number(this.value);
c2 = Math.round(c2 * 100) / 100;
});
$("[id*=2-]:radio:not(:hidden)").each(function() {
t2 += Number(this.value);
t2 = Math.round(t2 * 100) / 100;
});
$("[id*=3-]:radio:checked:not(:hidden)").each(function() {
c3 += Number(this.value);
c3 = Math.round(c3 * 100) / 100;
});
$("[id*=3-]:radio:checked:not(:hidden)").each(function() {
t3 += Number(this.value);
t3 = Math.round(t3 * 100) / 100;
});
$("[id*=4-]:radio:checked:not(:hidden)").each(function() {
c4 += Number(this.value);
c4 = Math.round(c4 * 100) / 100;
});
$("[id*=4-]:radio:checked:not(:hidden)").each(function() {
t4 += Number(this.value);
t4 = Math.round(t4 * 100) / 100;
});
var t0Disp = t0.toFixed(2);
var t1Disp = t1.toFixed(2);
var t2Disp = t2.toFixed(2);
var t3Disp = t3.toFixed(2);
var t4Disp = t4.toFixed(2);
var c0Disp = c0.toFixed(2);
var c1Disp = c1.toFixed(2);
var c2Disp = c2.toFixed(2);
var c3Disp = c3.toFixed(2);
var c4Disp = c4.toFixed(2);
$("#total").text(totalDisp);
$("#totalPerBoard").text(totalPerBoard);
$("#c0").text(c0Disp);
$("#c1").text(c1Disp);
$("#c2").text(c2Disp);
$("#c3").text(c3Disp);
$("#c4").text(c4Disp);
$("#cz0").text(cz0);
$("#cz1").text(cz1);
$("#cz2").text(cz2);
$("#cz3").text(cz3);
$("#cz4").text(cz4);
$("#cc0").text(cc0);
$("#cc1").text(cc1);
$("#cc2").text(cc2);
$("#cc3").text(cc3);
$("#cc4").text(cc4);
$("#t0").text(t0Disp);
$("#t1").text(t1Disp);
$("#t2").text(t2Disp);
$("#t3").text(t3Disp);
$("#t4").text(t4Disp);
var options = {};
var ctx = document.getElementById('pricing').getContext('2d');
var datapoints = [t0, t1, t2, t3, t4];
var chart = new Chart(ctx, {
// The type of chart we want to create
type: 'bar',
// The data for our dataset
data: {
labels: ["Newark", "Farnell", "DigiKey", "RS Components", "Mouser"],
datasets: [{
label: "Pricing",
backgroundColor: [
'rgb(133, 153, 0)',
'rgb(42, 161, 152)',
'rgb(38, 139, 210)',
'rgb(108, 113, 196)',
'rgb(211, 54, 130)',
'rgb(220, 50, 47)'
],
borderColor: 'rgb(88, 110, 117)',
data: datapoints,
}]
},
options: {
legend: {
display: false
},
title: {
display: true,
text: 'Cost of BOM by provider'
}
}
});
var ctx2 = document.getElementById('coverage').getContext('2d');
var coverage = [line0, line1, line2, line3, line4];
var chart = new Chart(ctx2, {
type: 'bar',
data: {
labels: ["Newark", "Farnell", "DigiKey", "RS Components", "Mouser"],
datasets: [{
label: "Coverage",
backgroundColor: [
'rgb(133, 153, 0)',
'rgb(42, 161, 152)',
'rgb(38, 139, 210)',
'rgb(108, 113, 196)',
'rgb(211, 54, 130)',
'rgb(220, 50, 47)'
],
borderColor: 'rgb(88, 110, 117)',
data: coverage,
}]
},
options: {
legend: {
display: false
},
title: {
display: true,
text: 'Coverage of BOM by Lines (%)'
}
}
});
});
</script>'''
footer = '''
<br><p>
<a href="http://jigsaw.w3.org/css-validator/check/referer">
<img style="border:0;width:88px;height:31px"
src="http://jigsaw.w3.org/css-validator/images/vcss"
alt="Valid CSS!" />
</a>
</p>'''
web.write("</table></body>" + script + "</html>")
# Now script has run, construct table with part counts & costs etc.
bomCostDisp = str(('{:3s} {:0,.2f}').format(baseCurrency, bomCost))
bomCostBoardDisp = str(('{:3s} {:0,.2f}').format(baseCurrency, bomCost/numBoards))
currency = str(('{:3s}').format(baseCurrency))
accounting.write("<tr class = 'accounting'>")
accounting.write("<td colspan = 2> Total number of boards </td>")
accounting.write("<td style='text-align:right;' id = 'numboards'>" + str(numBoards) +
"</td>")
accounting.write("</tr> <tr class = 'accounting'>")
accounting.write("<td colspan = 2> Total parts </td>")
accounting.write("<td style='text-align:right;'>" + str(countParts) +
"</td>")
accounting.write("</tr> <tr class = 'accounting'>")
accounting.write("<td colspan = 2>BOM Lines </td>")
accounting.write("<td style='text-align:right;'><span id='BOMLines'>" +
str(count_BOMLine) + "</span></td>")
accounting.write("</tr> <tr class = 'accounting'>")
accounting.write("<td colspan = 2> Non-PartKeepr Parts </td>")
accounting.write("<td style='text-align:right;' id='npkParts'>" + str(count_NPKP) +
"</td>")
accounting.write("</tr> <tr class = 'accounting'>")
accounting.write("<td colspan = 2> PartKeepr Parts</td>")
accounting.write("<td style='text-align:right;' id='pkParts'>" + str(count_PKP) +
"</td>")
accounting.write("</tr> <tr class = 'accounting'>")
accounting.write("<td colspan = 2> Parts without pricing info </td>")
accounting.write("<td style='text-align:right;'>" + str(count_PWP) +
"</td>")
accounting.write("</tr> <tr class = 'accounting'>")
accounting.write("<td colspan = 2> Low Stock </td>")
accounting.write("<td style='text-align:right;'>" +
str(count_LowStockLines) + "</td></tr>")
accounting.write("<tr class = 'accounting'><td></td><td></td><td></td></tr>")
accounting.write("<tr class = 'accounting'><td></td>")
accounting.write("<td style='text-align:right; font-weight: bold;'> Total </td>")
accounting.write("<td style='text-align:right; font-weight: bold;'> Per board</td>")
accounting.write("</tr> <tr class = 'accounting'>")
accounting.write("<td> Inventory prices </td>")
if not invalidate_BOM_Cost:
accounting.write("<td style='text-align:right'>" + bomCostDisp +
" </td>")
accounting.write("<td style='text-align:right' >" + bomCostBoardDisp +
"</td>")
else:
accounting.write("<td class = 'accounting'>BOM price not calculated </td> ")
accounting.write("</tr> <tr class = 'accounting'>")
accounting.write("<td>Price from selected </td><td style='text-align:right'>" +
currency + " <span id='total'></span></td>")
accounting.write("<td style='text-align:right'>" + currency + " <span id='totalPerBoard'></span></td></table>")
accounting.write(
"<div class = 'canvas'><canvas id='pricing'></canvas></div>")
accounting.write(
"<div class = 'canvas'><canvas id='coverage'></canvas></div>")
accounting.write(
"<div class = 'canvas2'><canvas id='compliance'></canvas></div>")
accounting.write(
"<div class = 'canvas2'><canvas id='lifecycle'></canvas></div>")
# Assemble webpage
pdf.output('labels.pdf', 'F')
pdf2.output('picklist.pdf', 'F')
web = open("assets/web/temp.html", "r")
web_out = open("webpage.html", "w")
accounting = open("assets/web/accounting.html", "r")
accounting = accounting.read()
htmlBody = web.read()
web_out.write(htmlHeader)
web_out.write(htmlAccountingHeader + accounting)
web_out.write(htmlBodyHeader + htmlBody)
print('Starting server...')
PORT = 8109
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler, bind_and_activate=False)
httpd.allow_reuse_address = True
httpd.server_bind()
httpd.server_activate()
print('Running server...')
print('Type Ctr + C to halt')
try:
webbrowser.open('http://localhost:'+str(PORT)+'/webpage.html')
httpd.serve_forever()
except KeyboardInterrupt:
httpd.socket.close()
httpd.shutdown()
httpd.server_close()
|
def fact(n):
return 1 if n == 0 else n*fact(n-1)
def p(k, l):
e = 2.71828
return (l**k)*(e**-l) / fact(k)
l = float(input())
k = int(input())
print(round(p(k, l), 3))
|
#!/usr/bin/env python3
import logging
try:
import discord
from discord.ext import commands
except ImportError:
print("Discord.py is required. See the README for instructions on installing it.")
exit(1)
from cogs import get_extensions
from constants import colors, info
from utils import l, LOG_SEP
import utils
LOG_LEVEL_API = logging.WARNING
LOG_LEVEL_BOT = logging.INFO
LOG_FMT = "[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s"
if info.DAEMON:
logging.basicConfig(format=LOG_FMT, filename='bot.log')
else:
logging.basicConfig(format=LOG_FMT)
logging.getLogger('discord').setLevel(LOG_LEVEL_API)
l.setLevel(LOG_LEVEL_BOT)
class Bot(commands.Bot):
def __init__(self, **kwargs):
super().__init__(
command_prefix=info.COMMAND_PREFIX,
case_insensitive=True,
description=kwargs.pop('description'),
status=discord.Status.dnd
)
self.app_info = None
self.cogs_loaded = set()
async def ready_status(self):
await self.change_presence(status=discord.Status.online)
async def on_connect(self):
l.info(f"Connected as {self.user}")
await self.change_presence(status=discord.Status.idle)
async def on_ready(self):
self.app_info = await self.application_info()
l.info(LOG_SEP)
l.info(f"Logged in as: {self.user.name}")
l.info(f"discord.py: {discord.__version__}")
l.info(f"Owner: {self.app_info.owner}")
l.info(LOG_SEP)
await self.load_all_extensions()
await self.ready_status()
async def on_resumed(self):
l.info("Resumed session.")
await self.ready_status()
async def load_all_extensions(self, reload=False):
"""Attempt to load all .py files in cogs/ as cog extensions.
Return a dictionary which maps cog names to a boolean value (True =
successfully loaded; False = not successfully loaded).
"""
succeeded = {}
disabled_extensions = set()
if not info.DEV:
disabled_extensions.add('tests')
for extension in get_extensions(disabled=disabled_extensions):
try:
if reload or extension not in self.cogs_loaded:
self.load_extension(f'cogs.{extension}')
l.info(f"Loaded extension '{extension}'")
self.cogs_loaded.add(extension)
succeeded[extension] = True
except Exception as exc:
l.error(f"Failed to load extension {extension!r} due to {type(exc).__name__}: {exc}")
if hasattr(exc, 'original'):
l.error(f"More details: {type(exc.original).__name__}: {exc.original}")
succeeded[extension] = False
if succeeded:
l.info(LOG_SEP)
return succeeded
async def on_guild_join(self, guild):
"""This event triggers when the bot joins a guild."""
l.info(f"Joined {guild.name} with {guild.member_count} users!")
async def on_message(self, message):
"""This event triggers on every message received by the bot, including
ones that it sent itself.
"""
if message.author.bot:
return # Ignore all bots.
if message.content.startswith(self.user.mention):
prefix = info.COMMAND_PREFIX
description = f"Hi! I'm {self.user.mention}, {info.DESCRIPTION[0].lower()}{info.DESCRIPTION[1:]}."
description += f" Type `{prefix}help` to get general bot help, `{prefix}help <command>` to get help for a specific command, and `!about` for general info about me."
await message.channel.send(embed=discord.Embed(
color=colors.INFO,
description=description,
))
else:
await self.process_commands(message)
async def on_command_error(self, exc, *args, **kwargs):
await utils.error_handling.on_command_error(exc, *args, **kwargs)
if __name__ == '__main__':
bot = Bot(description=info.DESCRIPTION)
try:
bot.run(info.TOKEN)
except discord.errors.LoginFailure:
print(f"Please specify a proper bot token in {info.CONFIG.filepath}.")
exit(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.