content stringlengths 5 1.05M |
|---|
from selenium.webdriver.common.by import By
from pom.base import Base
from time import sleep
class HomePage(Base):
"""
百度首页
"""
# 输入框元组
input_locator = (By.ID, 'kw')
# 按钮元组
btn_locator = (By.ID, 'su')
# 第一条数据
item_locator = (By.XPATH, '//*[@id="1"]/h3/a')
def search(self, text):
"""
搜索
:param text: 待搜索的文本
:return: 无
"""
# 打开浏览器
self.open_browser('http://www.baidu.com')
self.wait(20)
# 输入文本
self.input_text(*self.input_locator, text=text)
# 搜索
self.click(*self.btn_locator)
sleep(2)
self.click(*self.item_locator)
pass
pass
|
#!/usr/bin/env python
'''XML Namespace Constants'''
class Namespaces():
'''Static data on XML Namespaces'''
ns = {
"media": "http://vectortron.com/xml/media/media",
"movie": "http://vectortron.com/xml/media/movie"
}
@classmethod
def nsf(cls, in_tag):
'''return the fully qualified namespace to the prefix format'''
return '{' + Namespaces.ns[in_tag] + '}'
@classmethod
def ns_strip(cls, in_tag):
'''Strip the namespace from an element'''
out = in_tag.split("}", 1)
return out[1]
|
from subprocess import Popen, PIPE
import os
import shlex
import sys
import kosh
import hashlib
import numpy
from .wrapper import KoshScriptWrapper # noqa
def compute_fast_sha(uri, n_samples=10):
"""Compute a fast 'almost' unique identifier for a given uri
Assumes the uri is a path to a file, otherwise simply return hexdigest of md5 on the uri string
If uri path is valid the 'fast' sha is used by creating an hashlib from
* file size
* file first 2kb
* file last 2kb
* 2k samples read from `n_samples` evenly spaced in the file
Warning if size is unchanged and data is changed somewhere else than those samples the sha will be identical
:param uri: URI to compute fast_sha on
:type uri: str
:param n_samples: Number of samples to extract from uri (in addition to beg and end of file)
:type n_sampe: int
:return sha: hexdigested sha
:rtype: str
"""
if not os.path.exists(uri):
sha = hashlib.sha256(uri.encode())
return sha.hexdigest()
with open(uri, "rb") as f:
stats = os.fstat(f.fileno())
size = stats.st_size
sha = hashlib.sha256("{}".format(size).encode())
# Create list of start read
positions = [int(max(x, 0))
for x in numpy.linspace(0, size - 2048, n_samples + 2)]
prev = -1
for pos in positions:
# Small file will have multiple times the same bit to read
if pos != prev:
# Go there
f.seek(pos)
# read some small chunk
st = f.read(2048)
prev = pos
sha.update(st)
return sha.hexdigest()
def compute_long_sha(uri, buff_size=65536):
""" Computes sha for a given uri
:param uri: URI to compute fast_sha on
:type uri: str
:param buff_size: How much data to read at once
:type buff_size: int
:return sha: hexdigested sha
:rtype: str
"""
sha = hashlib.sha256()
with open(uri, "rb") as f:
while True:
st = f.read(buff_size)
if not st:
break
sha.update(st)
return sha.hexdigest()
def create_new_db(name, engine='sina', db='sql',
token="", keyspace=None, cluster=None):
"""create_new_db creates a new Kosh database, adds a single user
:param name: name of database
:type name: str
:param engine: engine to use, defaults to 'sina'
:type engine: str, optional
:param db: type of database for engine, defaults to 'sql', can be 'cass'
:type db: str, optional
:param token: for cassandra connection, token to use, defaults to "" means try to retrieve from user home dir
:type token: str, optional
:param keyspace: for cassandra keyspace to use, defaults to None means [user]_k
:type keyspace: str, optional
:param cluster: list of Casandra clusters to use
:type cluster: list of str
:return store: An handle to the Kosh store created
:rtype: KoshStoreClass
"""
user = os.environ["USER"]
if db == 'sql' and name[-4:].lower() != ".sql":
name += ".sql"
if engine == "sina":
cmd = "{}/init_sina.py --user={} --sina={} --sina_db={}".format(
sys.prefix + "/bin",
user,
db,
name)
elif engine == 'cassandra':
if keyspace is None:
keyspace = user + "_k"
cmd = "{}/init_cassandra.py --user={} --token={}" \
"--keyspace={} --tables_root={} --cluster={}".format(
sys.prefix + "/bin",
user,
token,
keyspace,
db,
cluster)
p = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
o, e = p.communicate()
if engine == "sina":
return kosh.KoshStore(engine="sina", db_uri=name)
|
#!/usr/bin/env python
#
# To start off, we just want to make a map out of tiles.
#
# Simon Heath
# 15/9/2005
import os, pygame
from pygame.locals import *
if not pygame.font: print 'Warning, fonts disabled'
if not pygame.mixer: print 'Warning, sound disabled'
# Test tile image
if pygame.image.get_extended():
IMAGE = pygame.image.load( "images/tiletest2.png" )
else:
raise "Waaah! No extended images!"
# We want an object to build a big map.
class Map:
def __init__( s ):
s.tile = IMAGE
s.w, s.h = s.tile.get_size()
s.x = 24
s.y = 24
s.grid = [[s.tile for i in range( s.x )] for j in range( s.y )]
def setTile( s, x, y, t ):
s.grid[y][x] = t
def getTile( s, x, y ):
s.grid[y][x]
class MapDrawer:
def __init__( s, map, surf ):
s.map = map
s.surf = surf
# The rect it blits to
s.screenRect = ()
# The window of tiles it's blitting
s.screenWindow = ()
def draw( s ):
yoff = 0
xoff = 0
for y in s.map.grid:
for x in y:
s.surf.blit( x, (xoff, yoff) )
xoff += s.map.w - 1
xoff = 0
yoff += s.map.h
def main():
pygame.init()
screen = pygame.display.set_mode( (640, 480) )
clock = pygame.time.Clock()
m = Map()
md = MapDrawer( m, screen )
while 1:
clock.tick(60)
md.draw()
pygame.display.flip()
print "Looped!"
for event in pygame.event.get():
if event.type == QUIT:
return
elif event.type == KEYDOWN and (event.key == K_ESCAPE \
or event.key == K_q):
return
if __name__ == '__main__': main()
|
from datetime import timedelta
from hypothesis import settings, Verbosity
settings.register_profile("default",
max_examples=10,
deadline=timedelta(milliseconds=1000),
database=None)
settings.register_profile("ci",
max_examples=10,
deadline=timedelta(milliseconds=10000),
database=None)
settings.register_profile("debug",
max_examples=1,
verbosity=Verbosity.verbose,
deadline=None,
database=None)
|
from dtc.enums.message_types import MessageTypes
from lib.base_message_type import BaseMessageType
class AccountBalanceAdjustmentReject(BaseMessageType):
def __init__(self,
request_id=None,
reject_text=None):
self.Type = MessageTypes.ACCOUNT_BALANCE_ADJUSTMENT_REJECT
self.RequestID = request_id
self.RejectText = reject_text
@staticmethod
def from_message_short(message_obj):
packet = message_obj.get('F')
return AccountBalanceAdjustmentReject(
request_id=packet[0],
reject_text=packet[1]
)
@staticmethod
def from_message_long(message_obj):
return AccountBalanceAdjustmentReject(
request_id=message_obj.get('RequestID'),
reject_text=message_obj.get('RejectText')
)
@staticmethod
def from_message(message_obj):
if 'F' in message_obj:
return AccountBalanceAdjustmentReject.from_message_short(message_obj)
else:
return AccountBalanceAdjustmentReject.from_message_long(message_obj)
@staticmethod
def get_message_type_name():
return "AccountBalanceAdjustmentReject"
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.framework.tensor import Tensor
from oneflow.nn.module import Module
class Gather_nd(Module):
def __init__(self) -> None:
super().__init__()
def forward(self, input, index):
return flow.F.gather_nd(input, index)
def gather_nd_op(input, index):
"""This operator is a high-dimensional extension of `gather`, `index` is a K-dimensional
tensor, which is regarded as a index of input Tensor `input`.
Each element defines a slice of `input`:
.. math::
output[i_{0},i_{1},...,i_{K-2}] = input[index(i_{0},i_{1},...,i_{K-2})]
Args:
input: The input Tensor.
index: The slice indices.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.Tensor(np.array([[1, 2,3], [4, 5,6],[7,8,9]]), dtype=flow.float)
>>> index_1 = flow.Tensor(np.array([[0], [2]]), dtype=flow.int)
>>> out_1 = flow.gather_nd(input,index_1)
>>> print(out_1.shape)
flow.Size([2, 3])
>>> out_1
tensor([[1., 2., 3.],
[7., 8., 9.]], dtype=oneflow.float32)
>>> index_2 = flow.Tensor(np.array([[0,2], [2,1]]), dtype=flow.int)
>>> out_2 = flow.gather_nd(input,index_2)
>>> out_2
tensor([3., 8.], dtype=oneflow.float32)
"""
return Gather_nd()(input, index)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
|
# coding=utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metrics.classification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tf_slim.metrics import classification
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def setUpModule():
tf.disable_eager_execution()
class ClassificationTest(test.TestCase):
def testAccuracy1D(self):
with self.cached_session() as session:
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int32, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0]})
self.assertEqual(result, 0.5)
def testAccuracy1DBool(self):
with self.cached_session() as session:
pred = array_ops.placeholder(dtypes.bool, shape=[None])
labels = array_ops.placeholder(dtypes.bool, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0]})
self.assertEqual(result, 0.5)
def testAccuracy1DInt64(self):
with self.cached_session() as session:
pred = array_ops.placeholder(dtypes.int64, shape=[None])
labels = array_ops.placeholder(dtypes.int64, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0]})
self.assertEqual(result, 0.5)
def testAccuracy1DString(self):
with self.cached_session() as session:
pred = array_ops.placeholder(dtypes.string, shape=[None])
labels = array_ops.placeholder(dtypes.string, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(
acc,
feed_dict={pred: ['a', 'b', 'a', 'c'],
labels: ['a', 'c', 'b', 'c']})
self.assertEqual(result, 0.5)
def testAccuracyDtypeMismatch(self):
with self.assertRaises(ValueError):
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int64, shape=[None])
classification.accuracy(pred, labels)
def testAccuracyFloatLabels(self):
with self.assertRaises(ValueError):
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.float32, shape=[None])
classification.accuracy(pred, labels)
def testAccuracy1DWeighted(self):
with self.cached_session() as session:
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int32, shape=[None])
weights = array_ops.placeholder(dtypes.float32, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={
pred: [1, 0, 1, 1],
labels: [1, 1, 0, 1],
weights: [3.0, 1.0, 2.0, 0.0]
})
self.assertEqual(result, 0.5)
def testAccuracy1DWeightedBroadcast(self):
with self.cached_session() as session:
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int32, shape=[None])
weights = array_ops.placeholder(dtypes.float32, shape=[])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={
pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0],
weights: 3.0,
})
self.assertEqual(result, 0.5)
class F1ScoreTest(test.TestCase):
def setUp(self):
super(F1ScoreTest, self).setUp()
np.random.seed(1)
def testVars(self):
classification.f1_score(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_thresholds=3)
expected = {'f1/true_positives:0', 'f1/false_positives:0',
'f1/false_negatives:0'}
self.assertEqual(
expected, set(v.name for v in variables.local_variables()))
self.assertEqual(
set(expected), set(v.name for v in variables.local_variables()))
self.assertEqual(
set(expected),
set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
f1, _ = classification.f1_score(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_thresholds=3,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [f1])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, f1_op = classification.f1_score(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_thresholds=3,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [f1_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes.int64, seed=2)
f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run([f1_op])
# Then verify idempotency.
initial_f1 = f1.eval()
for _ in range(10):
self.assertAllClose(initial_f1, f1.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes.float32)
labels = constant_op.constant(inputs)
f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
sess.run(variables.local_variables_initializer())
sess.run([f1_op])
self.assertEqual(1, f1.eval())
def testSomeCorrect(self):
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run([f1_op])
# Threshold 0 will have around 0.5 precision and 1 recall yielding an F1
# score of 2 * 0.5 * 1 / (1 + 0.5).
self.assertAlmostEqual(2 * 0.5 * 1 / (1 + 0.5), f1.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(10000, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes.float32)
f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
sess.run(variables.local_variables_initializer())
sess.run([f1_op])
# Threshold 0 will have around 0.5 precision and 1 recall yielding an F1
# score of 2 * 0.5 * 1 / (1 + 0.5).
self.assertAlmostEqual(2 * 0.5 * 1 / (1 + 0.5), f1.eval(), places=2)
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes.float32)
f1, f1_op = classification.f1_score(predictions, labels, weights,
num_thresholds=3)
sess.run(variables.local_variables_initializer())
sess.run([f1_op])
self.assertAlmostEqual(1.0, f1.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes.float32)
f1, f1_op = classification.f1_score(predictions, labels, weights,
num_thresholds=3)
sess.run(variables.local_variables_initializer())
sess.run([f1_op])
self.assertAlmostEqual(1.0, f1.eval(), places=5)
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes.float32)
labels = array_ops.zeros([4])
f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
sess.run(variables.local_variables_initializer())
sess.run([f1_op])
self.assertAlmostEqual(0.0, f1.eval(), places=5)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [-0.01, 0.5, 1.01]
expected_max_f1 = -1.0
for threshold in thresholds:
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] >= threshold:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
expected_f1 = (2 * expected_prec * expected_rec /
(epsilon + expected_prec + expected_rec))
if expected_f1 > expected_max_f1:
expected_max_f1 = expected_f1
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
tf_predictions, tf_labels = tf.data.Dataset.from_tensor_slices(
(predictions, labels)).repeat().batch(
batch_size).make_one_shot_iterator().get_next()
f1, f1_op = classification.f1_score(tf_labels, tf_predictions,
num_thresholds=3)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in range(num_batches):
sess.run([f1_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_max_f1, f1.eval(), 2)
if __name__ == '__main__':
test.main()
|
""" Admin for channels """
from django.contrib import admin
from moira_lists.models import MoiraList
class MoiraListAdmin(admin.ModelAdmin):
"""Admin for Moira Lists"""
model = MoiraList
search_fields = ("name", "users__email")
readonly_fields = ("users", "name")
def has_change_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
return False
admin.site.register(MoiraList, MoiraListAdmin)
|
import math
from typing import Tuple
from utils.points import generate_square
import numpy as np
import torch
class PatchesProcessor:
def __init__(self, points_per_side: int, total_patches: int, seed: int = 0xCAFFE) -> None:
self.total_patches = total_patches
self.rng = np.random.RandomState(seed)
patch_vertices = []
patch_edges = []
unit = 0.5 / points_per_side
base_vertices, base_edges = generate_square(points_per_side)
for _ in range(total_patches):
offsets = self.rng.rand(*base_vertices.shape)
offsets_abs = np.abs(offsets)
a_coeff = offsets_abs[:, 1] / offsets_abs[:, 0]
new_x = unit / (a_coeff + 1)
new_y = new_x * a_coeff
lengths = np.linalg.norm(np.stack((new_x, new_y), axis=-1), axis=-1, keepdims=True)
new_lengths = np.linalg.norm(offsets, axis=-1, keepdims=True) * lengths
direction = self.rng.randn(*base_vertices.shape)
new_offsets = (direction / np.linalg.norm(offsets, axis=-1, keepdims=True) * new_lengths)
new_vertices = base_vertices + new_offsets
patch_vertices.append(new_vertices)
patch_edges.append(base_edges)
self.patch_vertices = torch.stack(patch_vertices, dim=0).float()
self.patch_edges = torch.stack(patch_edges, dim=0).long()
def sample_patches_vertices(self, num: int) -> Tuple[torch.Tensor, torch.Tensor]:
indices = self.rng.randint(0, self.total_patches, size=num)
return self.patch_vertices[indices], torch.from_numpy(indices).to(self.patch_vertices).long()
def calculate_total_cosine_distance_of_normals(self, points: torch.Tensor, indices: torch.Tensor,
device: torch.device) -> torch.Tensor:
patch_edges = self.patch_edges[indices]
triangle_points = (
points.unsqueeze(dim=1)
.repeat((1, patch_edges.shape[1], 1, 1))
.gather(
index=patch_edges.unsqueeze(dim=-1).repeat((1, 1, 1, 3)).to(device),
dim=-2,
)
)
normals = torch.cross(
triangle_points[:, :, 1] - triangle_points[:, :, 0],
triangle_points[:, :, 2] - triangle_points[:, :, 0],
)
normalized_normals = normals / (normals.norm(dim=-1, keepdim=True) + 1e-5)
distances = (1 - torch.bmm(normalized_normals, normalized_normals.permute((0, 2, 1))).abs()).tril(-1)
total_cosine_distance = distances.pow(2).sum(dim=(1, 2))
return total_cosine_distance
|
######## Image Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 1/15/18
# Description:
# This program uses a TensorFlow-trained neural network to perform object detection.
# It loads the classifier and uses it to perform object detection on an image.
# It draws boxes, scores, and labels around the objects of interest in the image.
## Some of the code is copied from Google's example at
## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
## and some is copied from Dat Tran's example at
## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
## but we changed it to make it more understandable to us.
# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import json
from .DetectedObjects import DetectedObjects
CWD_PATH = os.getcwd()
PATH_TO_CKPT = os.path.join(
CWD_PATH,
"neural_network",
"graph.pb",
)
PATH_TO_LABELS = os.path.join(
CWD_PATH,
"neural_network",
"labels.json",
)
class ObjectDetection:
def __init__(self, path: str, json: bool = True):
self._path = path
self._json = json
self._load_category_index()
def _load_category_index(self):
with open(PATH_TO_LABELS, "r") as f:
self.category_index = json.load(f)
def _load_tensorflow(self):
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, "rb") as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name="")
self.sess = tf.Session(graph=detection_graph)
# Input tensor is the image
self.image_tensor = detection_graph.get_tensor_by_name("image_tensor:0")
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
self.detection_boxes = detection_graph.get_tensor_by_name("detection_boxes:0")
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
self.detection_scores = detection_graph.get_tensor_by_name("detection_scores:0")
self.detection_classes = detection_graph.get_tensor_by_name(
"detection_classes:0"
)
# Number of objects detected
self.num_detections = detection_graph.get_tensor_by_name("num_detections:0")
def run(self):
self._load_tensorflow()
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
image = cv2.imread(self._path)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_expanded = np.expand_dims(image_rgb, axis=0)
(boxes, scores, classes, num) = self.sess.run(
[
self.detection_boxes,
self.detection_scores,
self.detection_classes,
self.num_detections,
],
feed_dict={self.image_tensor: image_expanded},
)
self.sess.close()
objects = DetectedObjects(
boxes, scores, classes, num, self.category_index, self._path, image.shape
)
return objects.to_json() if self._json else objects
|
#%%
# imports
import urllib
from bs4 import BeautifulSoup
from pprint import pprint
import pandas as pd
# %%
# Notes
# The websites that were scraped:
# https://www.eventbrite.com/blog/70-event-ideas-and-formats-to-inspire-your-next-great-event-ds00/
# https://www.updater.com/blog/resident-event-ideas
# %%
# Defining functions
def extract_event_names_descriptions_eventbrite(results):
"""
Takes in a list of events from the eventbrite page.
Returns: Appropriate event names stripped of extraneous text and html tags,
zipped with the event descriptions
"""
events = results.find_all('h2')
descriptions = results.find_all('p')
event_names = list(map(lambda event: event.text.replace('\xa0', ' '),
events))[:-1]
event_names = [event_name.split(':')[-1] for event_name in event_names]
descriptions = list(map(lambda description: description.text.replace(
'\xa0', ' '), descriptions))[2:-1]
return zip(event_names, descriptions)
def extract_event_names_descriptions_updater(results):
"""
Takes in a list of events from the updater page.
Returns: Appropriate event names stripped of extraneous text and html tags,
zipped with the event descriptions.
"""
events = results.find_all('h3')
descriptions = results.find_all('p')
event_names = list(map(lambda event: event.text.split('. ')[-1], events))
descriptions = list(map(lambda description:
description.text, descriptions))[4:-2]
return zip(event_names, descriptions)
#%%
# Loading the html file and calling functions
with open('./data/eventbrite_ideas.html', 'r', encoding='utf8') as filehandle:
page_eb = filehandle.read()
soup_eb = BeautifulSoup(page_eb, 'html.parser')
results_eventbrite = soup_eb.find('div',
class_='post-content context-content-single context-content--eb-helpers')
eventnames_descriptions_eb = dict(extract_event_names_descriptions_eventbrite(
results_eventbrite))
with open('./data/updater_ideas.html', 'r', encoding='utf8') as filehandle:
page_up = filehandle.read()
soup_up = BeautifulSoup(page_up, 'html.parser')
results_updater = soup_up.find('div', class_='col sqs-col-12 span-12')
eventnames_descriptions_up = dict(extract_event_names_descriptions_updater(
results_updater))
eventnames_descriptions_eb.update(
eventnames_descriptions_up)
eventnames_descriptions = eventnames_descriptions_eb
# %%
# export to file so that I can further edit the wording and the verbiage
df = pd.DataFrame(eventnames_descriptions.items(),
columns=['Event Name', 'Event Description'])
df.to_csv('./data/all_events_names_descriptions.csv', index=False)
# %%
|
from moodle.base.general import GeneralStatus
class AgreeSitePolicyResponse(GeneralStatus):
"""Agree the site policy for the current user.
Args:
status (int): Status: true only if we set the policyagreed to 1 for the user
warnings (List[Warning]): list of warnings
"""
pass
|
from datetime import timedelta
import os
import jwt
from dotenv import load_dotenv
from rpc.gen.file.download.errors.ttypes import TDownloadError, TDownloadErrorCode
from .general_key import GeneralKeyModel
# MODELS MUST ONLY USE THRIFT ENUM AND EXCEPTIONS
# MODELS MAY NOT USE THRIFT STRUCTS
load_dotenv()
ENC_FILE = os.getenv("DOWNLOAD_ENC")
DEC_FILE = os.getenv("DOWNLOAD_DEC")
SECRET = os.getenv("DOWNLOAD_SECRET")
EXPIRATION = timedelta(minutes=int(os.getenv("DOWNLOAD_EXPIRATION_MINUTE") or "30"))
class DownloadModel(GeneralKeyModel):
def __init__(
self,
secret=SECRET,
expiration=EXPIRATION,
enc=None,
dec=None
):
super(DownloadModel, self).__init__(
ENC_FILE,
DEC_FILE,
secret,
expiration,
enc=enc,
dec=dec
)
def decode(self, func, ip, token):
try:
return super(DownloadModel, self).decode(func, ip, token)
except jwt.ExpiredSignatureError:
raise TDownloadError(TDownloadErrorCode.DOWNLOAD_TOKEN_EXPIRED)
except (jwt.InvalidIssuerError, jwt.InvalidAudienceError, jwt.DecodeError):
raise TDownloadError(TDownloadErrorCode.DOWNLOAD_TOKEN_INVALID)
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Created by Mingfei Chen (lasiafly@gmail.com)
# Created On: 2020-2-20
# ------------------------------------------------------------------------------
import cv2
import json
import numpy as np
import os
import os.path as osp
import torch
from torch.utils.data import Dataset
class ObjtrackDataset(Dataset):
def __init__(self,
data_root,
transform=None,
):
"""
info
{
'filename': 'a.jpg', # abs path for the image
'frame_id': 1 # <int> start from 0
'width': 1280,
'height': 720,
'ann': {
'track_id': <np.ndarray> (n, ), # start from 0, -1 DontCare
'bboxes': <np.ndarray> (n, 4), # 0-based, xmin, ymin, xmax, ymax
'labels': <np.ndarray> (n, ), # DontCare(ignore), car, pedestrain, cyclist(optioanl)
'occluded': <np.ndarray> (n, ), # optional
'truncated': <np.ndarray> (n, ), # optional
'alpha': <np.ndarray> (n, ), # 2D optional
'dimensions': <np.ndarray> (n, 3), # 2D optional
'location': <np.ndarray> (n, 3), # 2D optional
'rotation_y': <np.ndarray> (n, ), # 2D optional
}
}
Args:
data_root: absolute root path for train or val data folder
transform: train_transform or eval_transform or prediction_transform
"""
self.class_dict = {
'DontCare': -1,
'Pedestrian': 0,
'Car': 1,
'Cyclist': 2
}
self.images_dir = os.path.join(data_root, 'images')
self.labels_dir = os.path.join(data_root, 'labels')
self.transform = transform
self.track_infos = []
img_infos = {}
#scan the images_dir
images_dir_list = os.listdir(self.images_dir)
for frames_dir in images_dir_list:
img_infos[frames_dir] = []
frames_dir_path = os.path.join(self.images_dir, frames_dir)
gt_path = os.path.join(self.labels_dir, frames_dir+'.txt')
with open(gt_path, 'r') as f:
lines = f.readlines()
for line in lines:
labels = line.split()
if labels[1] == '-1': # DontCare
continue
frame_id = labels[0]
# already has info for this frame
if len(img_infos[frames_dir]) >= int(frame_id) + 1:
info = img_infos[frames_dir][int(frame_id)]
info['ann']['track_id'] = np.append(info['ann']['track_id'], int(labels[1]))
info['ann']['bboxes'] = np.vstack((info['ann']['bboxes'], np.array(labels[6:10], dtype=np.float32)))
info['ann']['labels'] = np.append(info['ann']['labels'], int(self.class_dict[labels[2]]+1))
info['ann']['truncated'] = np.append(info['ann']['truncated'], int(labels[3]))
info['ann']['occluded'] = np.append(info['ann']['occluded'], int(labels[4]))
info['ann']['alpha'] = np.append(info['ann']['alpha'], float(labels[5]))
info['ann']['dimensions'] = np.vstack((info['ann']['dimensions'], np.array(labels[10:13], dtype=np.float32)))
info['ann']['location'] = np.vstack((info['ann']['location'], np.array(labels[13:16], dtype=np.float32)))
info['ann']['rotation_y'] = np.append(info['ann']['rotation_y'], float(labels[16]))
else:
info = {}
info['frame_id'] = int(frame_id)
info['filename'] = os.path.join(frames_dir_path, frame_id.zfill(6)+'.png')
info['ann'] = dict(
track_id=np.array(labels[1], dtype=np.int64),
bboxes=np.array(labels[6:10], dtype=np.float32),
labels=np.array(self.class_dict[labels[2]]+1, dtype=np.int64),
truncated=np.array(labels[3], dtype=np.int64),
occluded=np.array(labels[4], dtype=np.int64),
alpha=np.array(labels[5], dtype=np.float32),
dimensions=np.array(labels[10:13], dtype=np.float32),
location=np.array(labels[13:16], dtype=np.float32),
rotation_y=np.array(labels[16], dtype=np.float32),
)
img_infos[frames_dir].append(info)
for frames_dir in img_infos.keys():
self.track_infos += [frame_info for frame_info in img_infos[frames_dir]]
def __len__(self):
return len(self.track_infos)
def __getitem__(self, index):
"""
Return:
data (tensor): a image
bboxes (tensor): shape: `(num_object, 4)`
box = bboxes[:, :4], label = bboxes[:, 4]
index (int): image index
"""
img_path = self.track_infos[index]['filename']
if not osp.exists(img_path):
logging.error("Cannot found image data: " + img_path)
raise FileNotFoundError
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
self.track_infos[index]['height'], self.track_infos[index]['width'] = img.shape[:2]
frame_id = self.track_infos[index]['frame_id']
track_id = self.track_infos[index]['ann']['track_id']
bboxes = self.track_infos[index]['ann']['bboxes']
labels = self.track_infos[index]['ann']['labels']
num_object = len(bboxes)
no_object = False
if num_object == 0:
# no gt boxes
no_object = True
bboxes = np.array([0, 0, 0, 0]).reshape(-1, 4)
labels = np.array([0]).reshape(-1, 1)
track_id = np.array([0]).reshape(-1, 1)
else:
bboxes = bboxes.reshape(-1, 4)
labels = labels.reshape(-1, 1)
track_id = track_id.reshape(-1, 1)
if self.transform is not None:
img, bboxes, labels = self.transform(
img, bboxes, labels, no_object
)
bboxes = np.hstack((bboxes, labels, track_id)) #labels, track_id right after bboxes
bboxes = torch.from_numpy(bboxes.astype(np.float32))
return img, bboxes, frame_id, index
if __name__ == "__main__":
dataset = ObjtrackDataset('/Users/chenmingfei/Downloads/Hwang_Papers/TNT_pytorch/data/training')
data = dataset.__getitem__(154)
print(data) |
# encoding=utf-8
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import viewsets, mixins
from rest_framework.routers import DefaultRouter
from lepus.models import *
from lepus.internal.serializers import AttackPointSerializer, UserSerializer
router = DefaultRouter()
class AttackPointViewSet(mixins.CreateModelMixin,
viewsets.GenericViewSet):
serializer_class = AttackPointSerializer
queryset = AttackPoint.objects.all()
router.register("attackpoints", AttackPointViewSet)
class UserViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = UserSerializer
queryset = User.objects.all()
def get_queryset(self):
ip = self.request.GET.get("ip")
if ip:
queryset = User.objects.by_ip(ip)
else:
queryset = self.queryset
return queryset
router.register("users", UserViewSet)
|
from setuptools import find_packages
from distutils.core import setup, Extension
with open('README.rst') as readme_file:
readme = readme_file.read()
setup(
name='WUDESIM_Py',
version='0.2.3',
description='A model for simulating water quality in the dead-end branches of drinking water distribution networks',
author='Ahmed Abokifa',
author_email=' abokifa@uic.edu',
packages=find_packages(), #same as name
license="MIT license",
long_description=readme,
include_package_data=True,
data_files=[('WUDESIM_Py', ['WUDESIM_Py/epanet2.dll', 'WUDESIM_Py/WUDESIM_LIB.dll'])]
)
|
class NazwaKlasy:
# pola składowe
a=0
d="ala ma kota"
# metody składowe
def wypisz(self):
print(self.a + self.b)
# warto zauważyć jawny argument
# w postaci obiektu tej klasy
# w C++ także występuje ale nie jest
# jawnie deklarowany, ani nie trzeba
# się nim jawnie posługiwać
# metody statyczna
@staticmethod
def info():
print("INFO")
# konstruktor (z jednym argumentem)
def __init__(self, x = 1):
print("konstruktor", self.a , self.d)
# i kolejny sposób na utworzenie
# pola składowego klasy
self.b = 13 * x
# korzystanie z klasy
k = NazwaKlasy()
k.a = 67
k.wypisz()
# do metod można odwoływać się także tak:
# (jawne użycie argumentu w postaci obiektu klasy)
NazwaKlasy.wypisz(k)
# korzystanie z metod statycznych
NazwaKlasy.info()
print("k jest typu:", type(k))
print("natomiast k.a jest typu:", type(k.a))
# obiekty można rozszerzać o nowe składowe i funkcje:
k.b = k.a + 10
print(k.b)
|
# -*- coding: utf-8 -*-
# loadlimit/cli.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Define CLI"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
from argparse import ArgumentParser
import asyncio
from collections import defaultdict
from contextlib import contextmanager, ExitStack
from datetime import datetime
from functools import partial
from importlib import import_module
from itertools import count
from logging import FileHandler, Formatter
import os
from os.path import abspath, isdir, join as pathjoin
from pathlib import Path
import sys
from tempfile import TemporaryDirectory
import time
# Third-party imports
from pandas import Timedelta
from pytz import timezone
from sqlalchemy import create_engine
from tqdm import tqdm
# Import uvloop if non-win32 platform
if sys.platform != 'win32':
import uvloop
# Local imports
from . import channel
from . import stat
from .core import BaseLoop, Client
from .importhook import TaskImporter
from .result import (SQLTimeSeries, SQLTotal, SQLTotalError, SQLTotalFailure,
TimeSeries, Total, TotalError, TotalFailure)
from .stat import (flushtosql, flushtosql_shutdown, measure, Period,
SendTimeData)
from .util import ageniter, Event, EventType, LogLevel, Namespace, TZ_UTC
# ============================================================================
# Globals
# ============================================================================
PROGNAME = 'loadlimit'
# ============================================================================
# Helpers
# ============================================================================
def commalist(commastr):
"""Transforms a comma-delimited string into a list of strings"""
return [] if not commastr else [c for c in commastr.split(',') if c]
class LoadLimitFormatter(Formatter):
"""Define nanoseconds for formatTime"""
converter = partial(datetime.fromtimestamp, tz=TZ_UTC)
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which wants
to make use of a formatted time. This method can be overridden in
formatters to provide for any specific requirement, but the basic
behaviour is as follows: if datefmt (a string) is specified, it is used
with datetime.strftime() to format the creation time of the record.
Otherwise, the ISO8601 format is used. The resulting string is
returned. This function uses a user-configurable function to convert
the creation time to a tuple. By default,
datetime.datetime.fromtimestamp() is used; to change this for a
particular formatter instance, set the 'converter' attribute to a
function with the same signature as time.localtime() or time.gmtime().
To change it for all formatters, for example if you want all logging
times to be shown in GMT, set the 'converter' attribute in the
Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
ct = self.converter(record.created)
s = ct.strftime(datefmt)
else:
t = ct.strftime(self.default_time_format)
s = self.default_msec_format % (t, record.msecs)
return s
class Printer:
"""Helper class for printing to stdout"""
def __init__(self, printfunc=None):
self._printfunc = None
self.printfunc = print if printfunc is None else printfunc
def __call__(self, *value, sep=' ', end='\n', file=sys.stdout,
flush=False, startnewline=False):
"""Print values"""
printfunc = self._printfunc
if flush:
file.flush()
if startnewline:
self.__call__('\n', flush=True)
if printfunc is print:
printfunc(*value, sep=sep, end=end, file=file)
else:
msg = '{}{}'.format(sep.join(str(v) for v in value), end)
printfunc(msg)
@property
def printfunc(self):
"""Return current printfunc"""
return self._printfunc
@printfunc.setter
def printfunc(self, func):
"""Set new printfunc"""
if not callable(func):
msg = ('printfunc expected callable, got value of type {}'.
format(type(func).__name__))
raise TypeError(msg)
self._printfunc = func
cleanup = channel.DataChannel(name='cleanup')
# ============================================================================
# tqdm integration
# ============================================================================
async def update_tqdm(config, state, name):
"""Update tqdm display"""
counter = count()
cur = next(counter)
while True:
cur = next(counter)
await asyncio.sleep(1)
pbar = state.progressbar[name]
if pbar.total is None or cur < pbar.total:
pbar.update(1)
state.tqdm_progress[name] = cur
if state.reschedule is False:
return
async def stop_tqdm(exitcode, *, manager=None, state=None, name=None):
"""Stop tqdm updating"""
progress = state.tqdm_progress[name]
pbar = state.progressbar[name]
if pbar.total is not None and progress < pbar.total:
pbar.update(pbar.total - progress)
state.tqdm_progress[name] = pbar.total
class TQDMCleanup:
"""Update cleanup progress bar"""
def __init__(self, config, state):
self._prev = None
self._state = state
async def __call__(self, qsize):
state = self._state
pbarkey = 'cleanup'
pbar = state.progressbar.get(pbarkey, None)
if pbar is None:
return
prev = self._prev
if prev is None:
self._prev = qsize
pbar.total = qsize
return
update = prev - qsize
self._prev = qsize
pbar.update(update)
state.tqdm_progress[pbarkey] += update
@contextmanager
def tqdm_context(config, state, *, name=None, sched=False, **kwargs):
"""Setup tqdm"""
# Do nothing
if not config['loadlimit']['show-progressbar']:
yield
return
# Setup tqdm
with tqdm(**kwargs) as pbar:
oldprinter = state.write.printfunc
if name is not None:
state.progressbar[name] = pbar
state.tqdm_progress[name] = 0
state.write.printfunc = tqdm.write
if sched:
asyncio.ensure_future(update_tqdm(config, state, name))
channel.shutdown(partial(stop_tqdm, state=state, name=name))
try:
yield pbar
finally:
if name is not None:
state.write.printfunc = oldprinter
state.progressbar[name] = None
class TQDMClient(Client):
"""tqdm-aware client"""
__slots__ = ()
async def __call__(self, state, *, clientid=None):
if clientid is None:
clientid = self.id
pbarkey = 'iteration'
pbar = state.progressbar.get(pbarkey, None)
if pbar is None:
await super().__call__(state, clientid=clientid)
return
ensure_future = asyncio.ensure_future
while True:
t = [ensure_future(corofunc(state, clientid=clientid))
for corofunc in self._corofunc]
await asyncio.gather(*t)
pbar.update(1)
state.tqdm_progress[pbarkey] += 1
if not self.option.reschedule:
return
async def init(self, config, state):
"""Initialize the client"""
pbarkey = 'init'
await super().init(config, state)
pbar = state.progressbar.get(pbarkey, None)
if pbar is not None:
pbar.update(1)
state.tqdm_progress[pbarkey] += 1
async def shutdown(self, config, state):
"""Shutdown the client"""
pbarkey = 'shutdown'
await super().shutdown(config, state)
pbar = state.progressbar.get(pbarkey, None)
if pbar is not None:
pbar.update(1)
state.tqdm_progress[pbarkey] += 1
# ============================================================================
# MainLoop
# ============================================================================
class MainLoop(BaseLoop):
"""Integrates with Client"""
def __init__(self, *args, clientcls=None, **kwargs):
super().__init__(*args, **kwargs)
self._clients = frozenset()
self._clientcls = Client if clientcls is None else clientcls
def initloghandlers(self, formatter):
"""Setup log handlers"""
ret = super().initloghandlers(formatter)
options = self._logoptions
logfile = options.get('logfile', None)
if logfile:
fh = FileHandler(logfile)
fh.setLevel(LogLevel.DEBUG.value)
fh.setFormatter(formatter)
ret = [fh]
return ret
def initlogformatter(self):
"""Setup log formatter"""
options = self._logoptions
formatter = super().initlogformatter()
formatter.converter = partial(datetime.fromtimestamp, tz=options['tz'])
return formatter
async def initclients(self, config, state, clients, loop):
"""Initialize clients according to the given rate"""
if not clients:
return
clients = set(clients)
rate = config['loadlimit']['initrate']
ensure_future = asyncio.ensure_future
numclients = len(clients)
if rate == 0:
rate = numclients
tasks = []
addtask = tasks.append
state.event.append(Event(EventType.init_start))
while True:
if numclients <= rate:
async for c in ageniter(clients):
addtask(ensure_future(c.init(config, state), loop=loop))
break
async for i in ageniter(range(rate)):
c = clients.pop()
addtask(ensure_future(c.init(config, state), loop=loop))
numclients = len(clients)
await asyncio.sleep(1)
await asyncio.gather(*tasks, loop=loop)
state.event.append(Event(EventType.init_end))
async def schedclients(self, config, state, clients, loop):
"""Schedule clients according to schedsize and sched_delay"""
if not clients:
return
size = config['loadlimit']['schedsize']
delay = config['loadlimit']['sched_delay'].total_seconds()
ensure_future = asyncio.ensure_future
sleep = asyncio.sleep
clients = set(clients)
numclients = len(clients)
numsched = 0
if size == 0:
size = numclients
if size == numclients:
delay = 0
state.event.append(Event(EventType.warmup_start))
while numsched < numclients:
if not state.reschedule:
break
for i in range(size):
c = clients.pop()
ensure_future(c(state), loop=loop)
numsched = numsched + size
await sleep(delay)
state.event.append(Event(EventType.warmup_end))
def init(self, config, state):
"""Initialize clients"""
countstore = state.countstore
clients = frozenset(self.spawn_clients(config))
self._clients = clients
loop = self.loop
ensure_future = asyncio.ensure_future
# Init clients
loop.run_until_complete(self.initclients(config, state, clients, loop))
# Clear countstore
countstore.reset()
# Schedule loop end
ensure_future(self.endloop(config, state), loop=loop)
# Schedule clients on the loop
ensure_future(self.schedclients(config, state, clients, loop),
loop=loop)
def shutdown(self, config, state):
"""Shutdown clients"""
loop = self.loop
# Tell clients to shutdown
ensure_future = asyncio.ensure_future
t = [ensure_future(c.shutdown(config, state), loop=loop)
for c in self._clients]
f = asyncio.gather(*t, loop=loop)
loop.run_until_complete(f)
def spawn_clients(self, config):
"""Spawns clients according the given config"""
taskmod = import_module('loadlimit.task')
tasklist = taskmod.__tasks__
numclients = config['loadlimit']['numusers']
clientcls = self._clientcls
tasks = [clientcls(tasklist, reschedule=True)
for _ in range(numclients)]
return tasks
async def endloop(self, config, state):
"""coro func that ends the loop after a given duration"""
duration = config['loadlimit']['duration']
await asyncio.sleep(duration.total_seconds())
# Stop rescheduling clients
state.reschedule = False
async for client in ageniter(self.clients):
client.option.reschedule = False
# Send shutdown command
await channel.shutdown.send(0)
@property
def clients(self):
"""Return spawned clients"""
return self._clients
# ============================================================================
#
# ============================================================================
class ProcessOptions:
"""Process cli options"""
def __call__(self, config, args):
llconfig = config['loadlimit']
order = ['timezone', 'numusers', 'duration', 'taskimporter', 'tqdm',
'cache', 'export', 'periods', 'logging', 'verbose',
'qmaxsize', 'flushwait', 'initrate', 'schedsize',
'sched_delay']
for name in order:
getattr(self, name)(llconfig, args)
def timezone(self, config, args):
"""Setup timezone config"""
config['timezone'] = timezone(args.timezone)
def numusers(self, config, args):
"""Setup number of users config"""
numusers = args.numusers
if numusers == 0:
raise ValueError('users option expected value > 0, got {}'.
format(numusers))
config['numusers'] = numusers
def duration(self, config, args):
"""Setup duration config"""
delta = Timedelta(args.duration)
if not isinstance(delta, Timedelta):
raise ValueError('duration option got invalid value {!r}'.
format(args.duration))
config['duration'] = delta
def taskimporter(self, config, args):
"""Setup task importer config"""
config['importer'] = TaskImporter(*args.taskfile)
def tqdm(self, config, args):
"""Setup tqdm config"""
config['show-progressbar'] = args.progressbar
def cache(self, config, args):
"""Setup cache config"""
cache_type = args.cache
config['cache'] = dict(type=cache_type)
def export(self, config, args):
"""Setup export config"""
config['export'] = exportsection = {}
exportsection['type'] = export = args.export
if export is not None:
exportdir = args.exportdir
if exportdir is None:
exportdir = os.getcwd()
if not isdir(exportdir):
raise FileNotFoundError(exportdir)
exportsection['targetdir'] = exportdir
def periods(self, config, args):
"""Setup period config"""
if args.periods <= 1:
raise ValueError('periods option must be > 1')
config['periods'] = args.periods
def logging(self, config, args):
"""Setup logging config"""
if args.uselogfile:
logfile = args.logfile
path = (Path.cwd() / '{}.log'.format(PROGNAME)
if logfile is None else Path(logfile))
if not path.parent.is_dir():
raise FileNotFoundError(str(path.parent))
elif path.is_dir():
raise IsADirectoryError(str(path))
config['logging'] = {'logfile': str(path)}
def verbose(self, config, args):
"""Setup verbosity config"""
verbosity = 10 if args.verbosity >= 3 else (3 - args.verbosity) * 10
logsection = config.setdefault('logging', {})
loglevels = {l.value: l for l in LogLevel}
logsection['loglevel'] = loglevels[verbosity]
def qmaxsize(self, config, args):
"""Setup verbosity config"""
config['qmaxsize'] = args.qmaxsize
def flushwait(self, config, args):
"""Setup flushwait config"""
try:
delta = Timedelta(args.flushwait)
except Exception:
delta = None
if not isinstance(delta, Timedelta):
raise ValueError('duration option got invalid value: {}'.
format(args.flushwait))
config['flushwait'] = delta
def initrate(self, config, args):
"""Setup initrate config"""
config['initrate'] = args.initrate
def schedsize(self, config, args):
"""Setup schedsize config"""
size = args.schedsize
numusers = config['numusers']
if size > numusers:
msg = ('sched-size option expected maximum value of {}, '
'got value {}')
raise ValueError(msg.format(numusers, size))
config['schedsize'] = size
def sched_delay(self, config, args):
"""Setup sched_delay config"""
try:
delta = Timedelta(args.sched_delay)
except Exception:
delta = None
if not isinstance(delta, Timedelta):
raise ValueError('sched-delay option got invalid value: {}'.
format(args.sched_delay))
config['sched_delay'] = delta
process_options = ProcessOptions()
def defaultoptions(parser):
"""cli arguments"""
parser.add_argument(
'-u', '--users', dest='numusers', default=1, type=int,
help='Number of users/clients to simulate'
)
parser.add_argument(
'-d', '--duration', dest='duration', default=None,
help='The length of time the load test will run for'
)
parser.add_argument(
'-t', '--task', dest='taskname', metavar='TASKNAME',
default=None, nargs='+',
help=('Task names to schedule')
)
parser.add_argument(
'--timezone', dest='timezone', default='UTC',
help='Timezone to display dates in (default: UTC)'
)
parser.add_argument(
'--no-progressbar', dest='progressbar', action='store_false',
help='Timezone to display dates in (default: UTC)'
)
# cache arguments
parser.add_argument(
'-C', '--cache', dest='cache', choices=['memory', 'sqlite'],
default='memory',
help='What type of storage to use as the cache. Default: memory'
)
# export arguments
parser.add_argument(
'-E', '--export', dest='export', choices=['csv', 'sqlite'],
default=None,
help='What type of file to export results to.'
)
parser.add_argument(
'-e', '--export-dir', dest='exportdir', default=None,
help='The directory to export results to.'
)
parser.add_argument(
'-p', '--periods', dest='periods', type=int, default=8,
help='The number of time periods to show in the results. Default: 8'
)
# taskfiles
parser.add_argument(
'taskfile', metavar='FILE', nargs='+',
help='Python module file to import as a task file'
)
# logging
parser.add_argument(
'-L', '--enable-logfile', dest='uselogfile', action='store_true',
help='Enable logging to a logfile'
)
parser.add_argument(
'-l', '--logfile', metavar='FILE', dest='logfile', default=None,
help=('If logging to a file is enabled, log to FILE. Default: {}.log'.
format(PROGNAME))
)
# Set loglevel
parser.add_argument(
'-v', '--verbose', dest='verbosity', action='count', default=0,
help='Increase verbosity'
)
# Set maximum number of pending data
parser.add_argument(
'--pending-size', dest='qmaxsize', default=1000, type=int,
help='Number of datapoints waiting to be worked on. Default: 1000'
)
# Set time to wait between flushes
parser.add_argument(
'--flush-wait', dest='flushwait', default='2s',
help=('The amount of time to wait before flushing data to disk. '
'Default: 2 seconds')
)
# Client init rate
parser.add_argument(
'--user-init-rate', dest='initrate', default=0, type=int,
help=('The number of users to spawn every second. '
'Default: 0 (ie spawn all users at once)')
)
# Client schedule rate
parser.add_argument(
'--sched-size', dest='schedsize', default=0, type=int,
help=('The number of users to schedule at once. '
'Default: 0 (ie schedule all users)')
)
parser.add_argument(
'--sched-delay', dest='sched_delay', default='0s',
help=('The amount of time to wait before scheduling the number of '
'users defined by --sched-size. Default: 0 (ie schedule all '
'users)')
)
parser.set_defaults(_main=RunLoop())
def create():
"""Construct basic cli interface"""
parser = ArgumentParser(prog=PROGNAME)
# Create loadlimit command
defaultoptions(parser)
return parser
class StatSetup:
"""Context setting up time recording and storage"""
def __init__(self, config, state):
self._config = config
self._state = state
self._calcobj = (None, None)
self._results = None
self._statsdict = None
self._countstore = state.countstore
# Setup event list
state.event = []
def __enter__(self):
config = self._config
state = self._state
llconfig = config['loadlimit']
self._statsdict = statsdict = Period()
countstore = self._countstore
if llconfig['cache']['type'] == 'memory':
self._calcobj = tuple(c(statsdict=statsdict, countstore=countstore)
for c in [Total, TimeSeries, TotalError,
TotalFailure])
state.sqlengine = None
else:
cachefile = pathjoin(llconfig['tempdir'], 'cache.db')
connstr = 'sqlite:///{}'.format(cachefile)
state.sqlengine = engine = create_engine(connstr)
self._calcobj = tuple(
c(statsdict=statsdict, sqlengine=engine, countstore=countstore)
for c in [SQLTotal, SQLTimeSeries, SQLTotalError,
SQLTotalFailure])
# Subscribe to shutdown command
channel.shutdown(partial(flushtosql_shutdown, statsdict=statsdict,
sqlengine=engine))
# Add flushtosql to timedata event
stat.timedata(flushtosql)
return self
def __exit__(self, errtype, err, errtb):
calcobj = self._calcobj
total, timeseries, error, failure = calcobj
countstore = self._countstore
with ExitStack() as stack:
# Set timeseries periods
timeseries.vals.periods = self._config['loadlimit']['periods']
if countstore.start_date is None:
return
# Add start and end events to the event timeline
event = self._state.event
event.insert(0, Event(EventType.start, countstore.start_date))
event.append(Event(EventType.end, countstore.end_date))
# Enter results contexts
for r in calcobj:
stack.enter_context(r)
# Run calculations
for name, data, err, fail in total:
for r in calcobj:
r.calculate(name, data, err, fail)
# Don't export
exportconfig = self._config['loadlimit']['export']
export_type = exportconfig['type']
if export_type is None:
results = (
(total.vals.results, ) + timeseries.vals.results +
(error.vals.results, failure.vals.results)
)
self._results = results
return
exportdir = exportconfig['targetdir']
# Export values
for r in calcobj:
r.export(export_type, exportdir)
# Capture any changes
results = (
(total.vals.results, ) + timeseries.vals.results +
(error.vals.results, failure.vals.results)
)
self._results = results
def startevent(self):
"""Start events"""
countstore = self._countstore
llconfig = self._config['loadlimit']
qmaxsize = llconfig['qmaxsize']
engine = self._state.sqlengine
# Schedule SendTimeData
flushwait = llconfig['flushwait']
send = SendTimeData(countstore, flushwait=flushwait,
channel=stat.timedata)
asyncio.ensure_future(send())
# Assign shutdown tasks
channel.shutdown(send.shutdown, anchortype=channel.AnchorType.first)
channel.shutdown(stat.timedata.shutdown)
channel.shutdown(cleanup.shutdown)
# Start cleanup channel
cleanup.add(TQDMCleanup(self._config, self._state))
cleanup.open()
cleanup.start()
# Start timedata channel
stat.timedata.open(maxsize=qmaxsize, cleanup=cleanup)
stat.timedata.start(statsdict=self._statsdict, sqlengine=engine)
@property
def results(self):
"""Return stored results"""
return self._results
class RunLoop:
"""Setup, run, and teardown loop"""
def __init__(self):
self._main = None
self._statsetup = None
def __call__(self, config, args, state):
"""Process cli options and start the loop"""
self.init(config, args, state)
stackorder = ['tempdir', 'statsetup', 'mainloop', 'mainloop_logging',
'initclients', 'setuptqdm', 'startmain',
'shutdown_clients']
with ExitStack() as stack:
pbar = stack.enter_context(
tqdm_context(config, state, name=PROGNAME, desc='Progress',
total=len(stackorder)))
for name in stackorder:
func = getattr(self, name)
if pbar is None:
state.write('{}: '.format(func.__doc__), end='')
state.write('', end='', flush=True)
time.sleep(0.1)
func(stack, config, state)
if pbar is None:
state.write('OK')
else:
time.sleep(0.1)
pbar.update(1)
ret = self._main.exitcode
self._main = None
self._statsetup = None
state.write('\n\n', startnewline=True)
return ret
def init(self, config, args, state):
"""Initial setup"""
llconfig = config['loadlimit']
# Setup win32 event loop
if sys.platform == 'win32':
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
# Process cli options
process_options(config, args)
# Set up importhook
sys.meta_path.append(llconfig['importer'])
# Create state namespace
state.reschedule = True
state.progressbar = {}
state.tqdm_progress = {}
state.write = Printer()
state.countstore = measure
self._statsetup = StatSetup(config, state)
def tempdir(self, stack, config, state):
"""Setup temporary directory"""
config['loadlimit']['tempdir'] = abspath(
stack.enter_context(TemporaryDirectory()))
def statsetup(self, stack, config, state):
"""Setup stats recording"""
stack.enter_context(self._statsetup)
def mainloop(self, stack, config, state):
"""Create main loop"""
loglevel = config['loadlimit']['logging']['loglevel']
self._main = stack.enter_context(MainLoop(loglevel=loglevel,
clientcls=TQDMClient))
def mainloop_logging(self, stack, config, state):
"""Setup main loop logging"""
llconfig = config['loadlimit']
logfile = llconfig.get('logging', {}).get('logfile', None)
main = self._main
main.initlogging(datefmt='%Y-%m-%d %H:%M:%S.%f',
style='{',
format='{asctime} {levelname} {name}: {message}',
fmtcls=LoadLimitFormatter, tz=llconfig['timezone'],
logfile=logfile)
def initclients(self, stack, config, state):
"""Create and initialize clients"""
numusers = config['loadlimit']['numusers']
with tqdm_context(config, state, name='init', desc='Ramp-up',
total=numusers):
self._main.init(config, state)
def setuptqdm(self, stack, config, state):
"""Setup tqdm progress bars"""
duration = int(config['loadlimit']['duration'].total_seconds())
stack.enter_context(tqdm_context(config, state, name='runtime',
total=duration, desc='Run time',
sched=True))
stack.enter_context(tqdm_context(config, state, name='iteration',
desc='Iterations'))
stack.enter_context(tqdm_context(config, state, name='cleanup',
desc='Cleanup'))
def startmain(self, stack, config, state):
"""Start the main loop"""
# Start events
self._statsetup.startevent()
# Start the loop
self._main.start()
def shutdown_clients(self, stack, config, state):
"""Tell clients to shutdown"""
numusers = config['loadlimit']['numusers']
with tqdm_context(config, state, name='shutdown',
desc='Stopping Clients', total=numusers):
self._main.shutdown(config, state)
# ============================================================================
# Main
# ============================================================================
def main(arglist=None, config=None, state=None):
"""Main cli interface"""
if not arglist:
arglist = sys.argv[1:]
if not arglist:
arglist.append('--help')
if config is None:
config = defaultdict(dict)
# Ensure loadlimit config section exists
if 'loadlimit' not in config:
config['loadlimit'] = {}
if state is None:
state = Namespace()
parser = create()
args = parser.parse_args(arglist)
exitcode = args._main(config, args, state)
sys.exit(exitcode)
if __name__ == '__main__':
main()
# ============================================================================
#
# ============================================================================
|
#pthhon3
import mmh3
import requests
import codecs
response = requests.get('https://yoursite path.com/favicon.ico')
favicon = codecs.encode(response.content,"base64")
hash = mmh3.hash(favicon)
print(hash)
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from random import randint
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Hello, World!')
class LottoPage(webapp2.RequestHandler):
def get(self):
lucky_combination = []
repeat = True
for i in range(6):
while (repeat):
num = randint(1,49)
if num in lucky_combination:
repeat = True
else:
repeat = False
lucky_combination.append(num)
repeat = True
logging.info(sorted(lucky_combination))
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(sorted(lucky_combination))
app = webapp2.WSGIApplication([
('/', MainPage),
('/lotto', LottoPage),
], debug=True)
|
import unittest.mock
import pytest
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.source.sql.oracle import OracleConfig, OracleSource
def test_oracle_config():
base_config = {
"username": "user",
"password": "password",
"host_port": "host:1521",
}
config = OracleConfig.parse_obj(
{
**base_config,
"service_name": "svc01",
}
)
assert (
config.get_sql_alchemy_url()
== "oracle+cx_oracle://user:password@host:1521/?service_name=svc01"
)
with pytest.raises(ValueError):
config = OracleConfig.parse_obj(
{
**base_config,
"database": "db",
"service_name": "svc01",
}
)
with unittest.mock.patch(
"datahub.ingestion.source.sql.sql_common.SQLAlchemySource.get_workunits"
):
OracleSource.create(
{
**base_config,
"service_name": "svc01",
},
PipelineContext("test-oracle-config"),
).get_workunits()
|
import os
import pprint
from argparse import ArgumentParser
from subprocess import check_call
from collections import OrderedDict as ODict
from margrie_libs.utils.folder_operations import folders_starting_with
from calcium_recordings.correct_folders import getDepthDirs, setRecNbs
from calcium_recordings.recording import Recording
def getParentFolder(folder):
return os.path.dirname(os.path.normpath(folder))
def getFolderBaseName(folder):
basename = os.path.basename(os.path.normpath(folder))
basename = basename.strip()
return basename
def getDepth(folder):
basename = getFolderBaseName(folder)
return int(basename.replace('um', ''))
def getDataFolders(srcDir):
dataFoldersPaths = []
depths = []
for d in getDepthDirs(srcDir):
depth = getDepth(d)
angleFolders = folders_starting_with(d, 'maxAngle')
print("Number of angles at {}um: {}".format(depth, len(angleFolders)))
for angleFolder in angleFolders:
recFolders = folders_starting_with(angleFolder, 'rec')
depths.append(depth) # So that it matches in items with dataFoldersPaths
dataFoldersPaths.append(recFolders)
return depths, dataFoldersPaths
def getRecsTree(srcDir):
depths, dataFolders = getDataFolders(srcDir)
recsTree = ODict()
for depth, dataFoldersAtDepth in zip(depths, dataFolders):
recs = []
if not depth in recsTree.keys():
recsTree[depth] = {}
for folder in dataFoldersAtDepth:
try:
recs.append(Recording(folder))
except RuntimeError as err: # Could not create instance
print("Warning: could not create recording for folder {}, skipping; {}".format(folder, err))
numberedRecs = setRecNbs(recs)
if numberedRecs: # Skip if nothing of type 'rec'
try:
angle = list(numberedRecs.keys())[0]
numberedRecsDict = list(numberedRecs.values())[0]
except IndexError:
print("Depth: {}".format(depth))
print("\tRecordings: {}".format(numberedRecs))
raise
recsTree[depth][angle] = numberedRecsDict
return recsTree
def main(srcDir, channelsToProcess, refChannel, overwrite, gaussianKernelSize, baselineN):
"""
MAIN FUNCTION
"""
fijiPrefix = 'fiji --allow-multiple '
macroPath = 'register.ijm'
fijiCmd = fijiPrefix + ' -batch ' + macroPath
srcDir = os.path.abspath(srcDir)
recsTree = getRecsTree(srcDir)
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(recsTree)
for depth, recsAtDepth in recsTree.items():
print("Processing depth: {}".format(depth))
if not recsAtDepth:
continue
for angle in recsAtDepth.keys():
print("\tProcessingangle: {}".format(angle))
for rec in recsAtDepth[angle]['recs']: # Skip the highRes and stacks
nChannels = len(rec.derotatedImgFilesPaths)
if len(channelsToProcess) <= nChannels:
process = False
if overwrite:
process = True
else:
if not(rec.registeredImgFilesPaths):
process = True
if process:
if baselineN == 'whole':
nRefImgs = rec.getNFrames()
elif baselineN == 'bsl':
nRefImgs = rec.getNBslFrames()
else:
raise ValueError("Expected one of ['whole', 'bsl'], got {}.".format(baselineN))
print("Registering images in {}".format(rec.dir))
imgPath1 = rec.derotatedImgFilesPaths[channelsToProcess[0]]
if len(channelsToProcess) > 1:
imgPath2 = rec.derotatedImgFilesPaths[channelsToProcess[1]]
else:
imgPath2 = imgPath1
wholeCmd = '{} {},{},{},{},{}'.format(fijiCmd,
rec.derotatedImgFilesPaths[refChannel],
imgPath1,
imgPath2,
nRefImgs,
gaussianKernelSize)
check_call(wholeCmd, shell=True)
else:
print("Info: Recording {} already processed, skipping as instructed".format(rec))
else:
print("Warning: Skipping recording {}, channel missing, got {} ou of {}".format(rec, nChannels, len(channelsToProcess)))
if __name__ == "__main__":
program_name = os.path.basename(__file__)
parser = ArgumentParser(prog=program_name, description='Program to recursively register (Fiji turboreg) all recordings in an experiment')
parser.add_argument("-b", "--baseline-type", dest="baselineN", type=str, choices=('whole', 'bsl'), default='whole', help="The type of baseline to use (one of %(choices)s). Default: %(default)s")
parser.add_argument("-k", "--kernel-size", dest="gaussianKernelSize", type=float, default=1.2, help="The size of the gaussian kernel to use for filtering prior to registration. Default: %(default)s")
parser.add_argument("-o", "--overwrite", action="store_true", help="Do we overwrite recordings that have already been processed")
parser.add_argument("-r", "--reference-channel", dest="refChannel", type=int, choices=(1, 2), default=1, help="The reference channel. Default: %(default)s")
parser.add_argument("-p", "--channels-to-process", dest="channelsToProcess", type=int, nargs='+', default=[1, 2], help="The list of channels to process. Default: %(default)s")
parser.add_argument('source_directory', type=str, help='The source directory of the experiment')
args = parser.parse_args()
if len(args.channelsToProcess) > 2:
raise ValueError("Channels to process has to be a list of 1 or 2 elements, got {}: {}".format(len(args.channelsToProcess), args.channelsToProcess))
## Translate to list indexing
refChannel = args.refChannel -1
channelsToProcess = [c-1 for c in args.channelsToProcess]
main(args.source_directory, channelsToProcess, refChannel, args.overwrite, args.gaussianKernelSize, args.baselineN)
|
# Author: Jacob Hallberg
# Last Edited: 12/30/2017
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_HuffmanEncode(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(772, 832)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 9, 771, 832))
font = QtGui.QFont()
font.setFamily("Garuda")
font.setPointSize(14)
self.tabWidget.setFont(font)
self.tabWidget.setTabPosition(QtWidgets.QTabWidget.North)
self.tabWidget.setTabShape(QtWidgets.QTabWidget.Rounded)
self.tabWidget.setObjectName("tabWidget")
self.Encode = QtWidgets.QWidget()
self.Encode.setObjectName("Encode")
self.UploadFile = QtWidgets.QPushButton(self.Encode)
self.UploadFile.setGeometry(QtCore.QRect(170, 580, 411, 141))
self.UploadFile.setObjectName("Upload File")
self.label_2 = QtWidgets.QLabel(self.Encode)
self.label_2.setGeometry(QtCore.QRect(70, 535, 611, 35))
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.tabWidget.addTab(self.Encode, "")
self.Decode = QtWidgets.QWidget()
self.Decode.setObjectName("Decode")
self.DecodeFile = QtWidgets.QPushButton(self.Decode)
self.DecodeFile.setGeometry(QtCore.QRect(170, 580, 411, 141))
self.DecodeFile.setObjectName("Decode File")
self.label = QtWidgets.QLabel(self.Decode)
self.label.setGeometry(QtCore.QRect(70, 535, 611, 35))
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.textBrowser = QtWidgets.QTextBrowser(self.Decode)
self.textBrowser.setGeometry(QtCore.QRect(0, 10, 780, 521))
self.textBrowser.setObjectName("textBrowser")
self.tabWidget.addTab(self.Decode, "")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Huffamn Encoding - Jacob Hallberg"))
self.UploadFile.setText(_translate("MainWindow", "Click to Browse Files"))
self.label_2.setText(_translate("MainWindow", "Click below to get started"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Encode), _translate("MainWindow", "Encode File"))
self.DecodeFile.setText(_translate("MainWindow", "Click to Browse Files"))
self.label.setText(_translate("MainWindow", "Upload Compressed File, Code Book"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Decode), _translate("MainWindow", "Decode File"))
|
import json
import os
import time
from itertools import chain
from functools import reduce
import pathlib
import numpy as np
from matplotlib import pyplot
import matplotlib
from keras.callbacks import TensorBoard, ModelCheckpoint
from keras.utils import plot_model
from keras.models import load_model
from keras import Input, Model
from keras import backend as K
import tensorflow as tf
from IOMap import IOMap
from LayerProvider import LayerProvider
from OptimizerResolver import OptimizerResolver
from ActivationResolver import ActivationResolver
from Tee import Tee
from copy import deepcopy;
class Seq2Seq(object):
def __init__(self, model_def=None, load=False, layer_provider=LayerProvider(),
optimizer_resolver=OptimizerResolver(), working_dir="./", activation_resolver=ActivationResolver()):
self.layer_provider = layer_provider
self.optimizer_resolver = optimizer_resolver
self.activation_resolver = activation_resolver
self.working_dir = working_dir
self.training_result = None
self.custom_objs = {}
self.history = {"loss": [], "val_loss": []}
self.last_train_data = {"train": {"in": [], "out": []}, "validate": {"in": [], "out": []}}
self.rnn_layers = {
"LSTM": {
"type": "LSTM",
"states": [
"state_h",
"state_c"
],
"state_count": 0
},
"GRU": {
"type": "GRU",
"states": [
"state"
],
"state_count": 0
}
}
for name, layer in self.rnn_layers.items():
layer["state_count"] = len(layer["states"])
if load:
model_def = self._load_model_def()
else:
pathlib.Path("{}/checkpoints".format(working_dir)).mkdir(parents=True, exist_ok=True)
self.start_token = "[S]"
self.end_token = "[E]"
self.model_def_original = model_def
self.model_def = deepcopy(model_def)
self.preprocess_model_def()
if "reverse_input" not in self.model_def:
self.model_def["reverse_input"] = False
if self.start_token not in self.model_def["out_tokens"]:
self.model_def["out_tokens"].append(self.start_token)
if self.end_token not in self.model_def["out_tokens"]:
self.model_def["out_tokens"].append(self.end_token)
self.in_map = IOMap(self.model_def["in_tokens"])
self.out_map = IOMap(self.model_def["out_tokens"])
if self.model_def["layers"][0]["type"] == "Input":
self.model_def["layers"].pop(0)
last_rnn = None
for i, layer in enumerate(self.model_def["layers"]):
if "name" not in layer:
layer["name"] = layer["type"]
if layer["type"] in self.rnn_layers:
if last_rnn is not None:
last_rnn["last_rnn"] = False
layer["last_rnn"] = True
last_rnn = layer
if load:
self.load_last_data()
self._rebuild()
else:
self._build()
def _load_model_def(self):
with open("{}/model.json".format(self.working_dir), "r") as file:
return json.load(file)
def preprocess_model_def(self):
self.model_def["compile"]["optimizer"] = self.optimizer_resolver(self.model_def["compile"]["optimizer"], self)
for layer in self.model_def["layers"]:
if "activation" in layer["params"]:
layer["params"]["activation"] = self.activation_resolver(layer["params"]["activation"], self)
self.custom_objs[layer["params"]["activation"].__name__] = layer["params"]["activation"]
@staticmethod
def _find_layer_by_name(model, name):
for layer in model.layers:
if layer.name == name:
return layer
return None
def _rebuild(self):
print("Loading model...")
self.training_model = load_model("{}/model.h5".format(self.working_dir), self.custom_objs)
encoder_in = self._find_layer_by_name(self.training_model, "encoder_input").output
encoder_out = []
decoder_in = [self._find_layer_by_name(self.training_model, "decoder_input").output]
decoder_out = []
last_decoder_out = decoder_in[0]
encoder_finished = False
for i, layer_def in enumerate(self.model_def["layers"]):
print("Rebuilding {}-{}...".format(layer_def['name'], i))
if layer_def["type"] in self.rnn_layers:
for state in self.rnn_layers[layer_def["type"]]["states"]:
decoder_in.append(
Input(shape=(layer_def['params']['units'],), name="decoder_{}-{}_{}_input".format(layer_def['name'], i, state))
)
if not encoder_finished and layer_def["type"] in self.rnn_layers:
if layer_def["last_rnn"]:
encoder_finished = True
elayer = self._find_layer_by_name(self.training_model, "encoder_{}-{}".format(layer_def['name'], i))
encoder_out += elayer.output[1:]
dlayer = self._find_layer_by_name(self.training_model, "decoder_{}-{}".format(layer_def['name'], i))
if layer_def["type"] in self.rnn_layers:
dout = dlayer(last_decoder_out, initial_state=decoder_in[-self.rnn_layers[layer_def["type"]]["state_count"]:])
last_decoder_out = dout[0]
decoder_out += dout[1:]
else:
last_decoder_out = dlayer(last_decoder_out)
self.encoder_model = Model(inputs=encoder_in, outputs=encoder_out)
self.decoder_model = Model(inputs=decoder_in, outputs=[last_decoder_out] + decoder_out)
print("Done.")
def _build(self):
print("Building models...")
encoder_in = Input(shape=(None, self.in_map.length()), name="encoder_input")
encoder_out = []
decoder_in = [Input(shape=(None, self.out_map.length()), name="decoder_input")]
decoder_out = []
training_in = [encoder_in] + decoder_in
last_encoder_out = encoder_in
last_tdecoder_out = decoder_in[0]
last_decoder_out = decoder_in[0]
encoder_finished = False
for i, layer_def in enumerate(self.model_def["layers"]):
print("Adding {}-{}...".format(layer_def['name'], i))
layer_ctor = self.layer_provider[layer_def["type"]](layer_def, i, self)
if layer_def["type"] in self.rnn_layers:
layer_def["params"]["return_state"] = True
layer_def["params"]["return_sequences"] = True
for state in self.rnn_layers[layer_def["type"]]["states"]:
decoder_in.append(
Input(shape=(layer_def['params']['units'],), name="decoder_{}-{}_{}_input".format(layer_def['name'], i, state))
)
if not encoder_finished:
eparam = deepcopy(layer_def["params"])
if i == len(self.model_def["layers"]) - 1:
eparam["units"] = self.in_map.length()
if layer_def["type"] in self.rnn_layers and layer_def["last_rnn"]:
eparam["return_sequences"] = False
encoder_finished = True
elayer = layer_ctor(name="encoder_{}-{}".format(layer_def['name'], i), **eparam)
eout = elayer(last_encoder_out)
if layer_def["type"] in self.rnn_layers:
last_encoder_out = eout[0]
encoder_out += eout[1:]
else:
last_encoder_out = eout
dparam = deepcopy(layer_def["params"])
if i == len(self.model_def["layers"]) - 1:
dparam["units"] = self.out_map.length()
dlayer = layer_ctor(name="decoder_{}-{}".format(layer_def['name'], i), **dparam)
if layer_def["type"] in self.rnn_layers:
tdout = dlayer(last_tdecoder_out, initial_state=encoder_out[-self.rnn_layers[layer_def["type"]]["state_count"]:])
last_tdecoder_out = tdout[0]
dout = dlayer(last_decoder_out, initial_state=decoder_in[-self.rnn_layers[layer_def["type"]]["state_count"]:])
last_decoder_out = dout[0]
decoder_out += dout[1:]
else:
last_tdecoder_out = dlayer(last_tdecoder_out)
last_decoder_out = dlayer(last_decoder_out)
self.training_model = Model(inputs=training_in, outputs=last_tdecoder_out)
self.encoder_model = Model(inputs=encoder_in, outputs=encoder_out)
self.decoder_model = Model(inputs=decoder_in, outputs=[last_decoder_out] + decoder_out)
print("Compiling the training model...")
self.training_model.compile(**self.model_def["compile"])
print("Done.")
def proccess_training_data(self, training_data, validation_data):
for record in chain(training_data.items(), validation_data.items()):
record[1].insert(0, self.start_token)
record[1].append(self.end_token)
train_encoder_input, train_decoder_input, train_decoder_output = \
self.vectorize_input(training_data, self.model_def["max_in_length"], self.model_def["max_out_length"])
validate_encoder_input, validate_decoder_input, validate_decoder_output = \
self.vectorize_input(validation_data, self.model_def["max_in_length"], self.model_def["max_out_length"])
self.last_train_data = {
"train": {
"in": train_encoder_input,
"out": train_decoder_input
},
"validate": {
"in": validate_encoder_input,
"out": validate_decoder_input
}
}
return train_encoder_input, train_decoder_input, train_decoder_output,\
validate_encoder_input, validate_decoder_input, validate_decoder_output
def load_last_data(self):
files = os.listdir(self.working_dir)
training_data = None
validation_data = None
for file in files:
if file.startswith("training_data_"):
with open("{}/{}".format(self.working_dir, file)) as t:
training_data = json.load(t)
elif file.startswith("validation_data_"):
with open("{}/{}".format(self.working_dir, file)) as v:
validation_data = json.load(v)
if training_data is not None and validation_data is not None:
break
self.proccess_training_data(training_data, validation_data)
def train(self, data=None, training_data=None, validation_data=None, validation_split=0.3, **kwargs):
if data is not None:
train_n = int(len(data) * (1. - validation_split))
training_words = np.random.choice(list(data.keys()), train_n, False)
training_data = {k: v for (k, v) in data.items() if k in training_words}
validation_data = {k: v for (k, v) in data.items() if k not in training_words}
train_date = time.strftime("%d_%m_%Y-%H%M%S")
with open("{}/training_data_{}.json".format(self.working_dir, train_date), "w") as file:
json.dump(training_data, file, indent=2)
with open("{}/validation_data_{}.json".format(self.working_dir, train_date), "w") as file:
json.dump(validation_data, file, indent=2)
train_encoder_input, train_decoder_input, train_decoder_output, \
validate_encoder_input, validate_decoder_input, validate_decoder_output = \
self.proccess_training_data(training_data, validation_data)
tensorboard_callback = TensorBoard(log_dir="{}/tensorboard".format(self.working_dir))
model_checkpoint = ModelCheckpoint(
filepath="{}/checkpoints/weights.{{epoch:03d}}-loss{{loss:.4f}}-val_loss{{val_loss:.4f}}.hdf5".format(self.working_dir),
monitor="val_loss",
verbose=1,
save_weights_only=True,
period=10
)
default_callbacks = [tensorboard_callback, model_checkpoint]
if "callbacks" in kwargs:
kwargs["callbacks"].extend(default_callbacks)
else:
kwargs["callbacks"] = default_callbacks
kwargs["validation_data"] = ([validate_encoder_input, validate_decoder_input], validate_decoder_output)
self.training_result = self.training_model.fit([train_encoder_input, train_decoder_input], train_decoder_output, **kwargs)
self.history = self.training_result.history
def vectorize_input(self, data, max_enc_length, max_dec_length):
encoder_input = np.zeros((len(data), max_enc_length, self.in_map.length()), dtype='float32')
decoder_input = np.zeros((len(data), max_dec_length, self.out_map.length()), dtype='float32')
decoder_output = np.zeros((len(data), max_dec_length, self.out_map.length()), dtype='float32')
for i, (key, value) in enumerate(data.items()):
e_input = key[::-1] if self.model_def["reverse_input"] else key
encoder_input[i] = self.in_map.encode(e_input, max_enc_length)
decoder_input[i] = self.out_map.encode(value, max_dec_length)
decoder_output[i] = self.out_map.encode(value[1:], max_dec_length)
return encoder_input, decoder_input, decoder_output
def __infer(self, input, max_length=255):
states = self.encoder_model.predict(input)
if len(self.encoder_model.outputs) == 1:
states = [states]
result = self.out_map.encode([self.start_token])
end_frame = self.out_map.encode([self.end_token])[0]
while np.argmax(result[-1]) != np.argmax(end_frame) and len(result) <= max_length:
states.insert(0, result[-1].reshape((1, 1,) + result[-1].shape))
output = self.decoder_model.predict(states)
states = output[1:]
result = np.append(result, output[0][0], axis=0)
return self.out_map.decode(result)
def _infer(self, input, max_length=255):
return self.__infer(input.reshape((1,) + input.shape), max_length)
def infer(self, input, max_length=255):
input = self.in_map.encode(input, self.model_def["max_in_length"])
return self._infer(input, max_length)
def infer_many(self, input_list, max_length=255):
result = []
for input in input_list:
result.append(self.infer(input, max_length))
return result
def save_history(self):
matplotlib.rcParams['figure.figsize'] = (18, 16)
matplotlib.rcParams['figure.dpi'] = 180
pyplot.figure()
pyplot.plot(self.history['loss'])
pyplot.plot(self.history['val_loss'])
pyplot.title('model train vs validation loss')
pyplot.ylabel('loss')
pyplot.xlabel('epoch')
pyplot.legend(['train', 'validation'], loc='upper right')
pyplot.savefig("{}/history.pdf".format(self.working_dir))
def save_summary(self, width=256):
with Tee("{}/summary.log".format(self.working_dir), 'w'):
self.training_model.summary(width)
@staticmethod
def _acc_report(data, count):
print("Out of {} test cases:".format(count))
for err_n, err_count in sorted(data.items()):
print("\tHad {} mistakes: {} ({:.2f}%)".format(err_n, err_count, err_count / count * 100))
@staticmethod
def _write_report_csv(records, file):
file.write("input;output;expected;errors;\n")
for rec in records:
file.write("{};".format(reduce(lambda a, b: "{}{}".format(a, b), rec['input'])) +
"{};".format(reduce(lambda a, b: "{}:{}".format(a, b), rec['output'])) +
"{};".format(reduce(lambda a, b: "{}:{}".format(a, b), rec['expected_output'])) +
"{};\n".format(rec['errors']))
file.write('\n')
def validate(self, inputs, real_outputs):
pairs = zip(inputs, real_outputs)
accuracy = {}
for pair in pairs:
errors = 0
output = self.infer(pair[0])
for symbols in zip(output, pair[1]):
errors += symbols[0] != symbols[1]
errors += abs(len(output) - len(pair[1]))
accuracy[errors] = 1 if errors not in accuracy else accuracy[errors] + 1
return accuracy
def _generate_report(self, data_type=None):
accuracy = {"train": {}, "validate": {}, "full": {}}
report = {"train": [], "validate": [], "full": []}
if data_type is None:
data_type = self.last_train_data
for type in data_type:
for idx in range(0, len(self.last_train_data[type]["in"])):
input = self.last_train_data[type]["in"][idx]
true_output = self.out_map.decode(self.last_train_data[type]["out"][idx])
output = self._infer(input)
errors = 0
for offset in range(min(len(output), len(true_output))):
errors += output[offset] != true_output[offset]
errors += abs(len(output) - len(true_output))
accuracy[type][errors] = 1 if errors not in accuracy[type] else accuracy[type][errors] + 1
accuracy["full"][errors] = 1 if errors not in accuracy["full"] else accuracy["full"][errors] + 1
record = {
"input": self.in_map.decode(input),
"output": output,
"expected_output": true_output,
"errors": errors
}
report[type].append(record)
report["full"].append(record)
return accuracy, report
def save_accuracy(self, accuracy, report, name):
train = accuracy["train"][0] if 0 in accuracy["train"] else 0
val = accuracy["validate"][0] if 0 in accuracy["validate"] else 0
full = accuracy["full"][0] if 0 in accuracy["full"] else 0
with Tee("{}/{}-{:.2f}-{:.2f}-{:.2f}.log".format(
self.working_dir,
name,
train / (1 if len(report["train"]) == 0 else len(report["train"])) * 100,
val / (1 if len(report["validate"]) == 0 else len(report["validate"])) * 100,
full / (1 if len(report["full"]) == 0 else len(report["full"])) * 100
),
'a'
):
print('------------------Accuracy----------------------')
print('-----------------Train set----------------------')
self._acc_report(accuracy["train"], len(report["train"]))
print('---------------Validation set-------------------')
self._acc_report(accuracy["validate"], len(report["validate"]))
print('------------------Full set----------------------')
self._acc_report(accuracy["full"], len(report["full"]))
print('------------------------------------------------')
def save_report(self, report, name):
for rep in report:
with open("{}/{}_{}.csv".format(self.working_dir, name, rep), "w") as file:
self._write_report_csv(report[rep], file)
def save_full_report(self, accuracy_name='accuracy', report_name='result'):
accuracy, report = self._generate_report()
self.save_accuracy(accuracy, report, accuracy_name)
self.save_report(report, report_name)
def load_weights(self, path):
self.training_model.load_weights(path)
def evaluate_checkpoints(self, progress=lambda p: None, data_type=None):
if not os.path.isdir("{}/checkpoints".format(self.working_dir)):
print("No checkpoint folder!")
return
checkpoints = [file for file in os.listdir("{}/checkpoints".format(self.working_dir))
if file.startswith('weights.') and file.endswith('.hdf5')]
for idx, checkpoint in enumerate(sorted(checkpoints)):
path = "{}/checkpoints/{}".format(self.working_dir, checkpoint)
print("Evaluating {}...".format(path))
self.load_weights(path)
relative_path = "checkpoints/{}".format(checkpoint)
accuracy, report = self._generate_report(data_type)
self.save_accuracy(accuracy, report, relative_path)
progress(idx+1)
self.load_weights("{}/model-weights.h5".format(self.working_dir))
def save_model(self):
self.training_model.save("{}/model.h5".format(self.working_dir))
self.training_model.save_weights("{}/model-weights.h5".format(self.working_dir))
def save_for_inference_tf(self):
print('Saving tf models for inference...\n\tIdentifying encoder output names...')
rnn_layers = list(
map(
lambda x: self.rnn_layers[x['type']],
filter(
lambda x: x['type'] in self.rnn_layers,
self.model_def['layers']
)
)
)
tf.identity(self.encoder_model.inputs[0], 'encoder_input')
encoder_output_names = []
enc_idx = 0
for layer_idx, layer in enumerate(rnn_layers):
for state in layer['states']:
encoder_output_names.append("encoder_{}-{}_{}_output".format(layer["type"], layer_idx, state))
tf.identity(self.encoder_model.outputs[enc_idx], encoder_output_names[-1])
enc_idx += 1
print("\t{}".format(encoder_output_names))
print("\tConverting encoder variables to constants...")
session = K.get_session()
encoder_model_const = tf.graph_util.convert_variables_to_constants(
session,
session.graph.as_graph_def(),
encoder_output_names)
print("\tSaving encoder inference model...")
tf.io.write_graph(encoder_model_const, self.working_dir, "encoder_inference_model.pbtxt", as_text=True)
tf.io.write_graph(encoder_model_const, self.working_dir, "encoder_inference_model.pb", as_text=False)
print("\tIdentifying decoder output names...")
tf.identity(self.decoder_model.inputs[0], 'decoder_input')
decoder_output_names = ['decoder_output']
tf.identity(self.decoder_model.outputs[0], decoder_output_names[0])
dec_idx = 1
for layer_idx, layer in enumerate(rnn_layers):
for state in layer['states']:
decoder_output_names.append("decoder_{}-{}_{}_output".format(layer["type"], layer_idx, state))
tf.identity(self.decoder_model.outputs[dec_idx], decoder_output_names[-1])
tf.identity(self.decoder_model.inputs[dec_idx], "decoder_{}-{}_{}_input".format(layer["type"], layer_idx, state))
dec_idx += 1
print("\t{}".format(decoder_output_names))
print("\tConverting decoder variables to constants...")
decoder_model_const = tf.graph_util.convert_variables_to_constants(
session,
session.graph.as_graph_def(),
decoder_output_names
)
print("\tSaving decoder inference model...")
tf.io.write_graph(decoder_model_const, self.working_dir, "decoder_inference_model.pbtxt", as_text=True)
tf.io.write_graph(decoder_model_const, self.working_dir, "decoder_inference_model.pb", as_text=False)
print("Saving tf models for inference - Done")
def save_model_plots(self):
plot_model(self.training_model, to_file="{}/model.png".format(self.working_dir), show_layer_names=True, show_shapes=True)
plot_model(self.encoder_model, to_file="{}/encoder_model.png".format(self.working_dir), show_layer_names=True, show_shapes=True)
plot_model(self.decoder_model, to_file="{}/decoder_model.png".format(self.working_dir), show_layer_names=True, show_shapes=True)
def save_no_train(self, width=256):
#self.save_model_plots()
self.save_summary(width=width)
def save_model_def(self):
with open("{}/model.json".format(self.working_dir), "w") as file:
self.model_def["out_tokens"].remove(self.start_token)
self.model_def["out_tokens"].remove(self.end_token)
json.dump(self.model_def_original, file, indent=2)
self.model_def["out_tokens"].append(self.start_token)
self.model_def["out_tokens"].append(self.end_token)
def save(self, width=256):
self.save_model()
self.save_model_def()
self.save_no_train(width)
self.save_history()
self.save_full_report()
self.save_for_inference_tf()
'''
Add a class Seq2Seq model
Builder should set all needed fields, so that
the model would be training and inference capable
'''
'''
Training model:
in: encoder inputs (Input layer), decoder inputs (Input layer)
out: decoder outputs
Encoder model:
in: encoder inputs (Input layer)
out: all encoder LSTM layer states
Decoder model:
in: decoder inputs (Input layer), state inputs for all LSTM layers
out: decoder outputs, all LSTM layer states
'''
|
from .bit import *
from .inception import *
from .inception_resnet import *
from .mobilenet import *
from .resnet import * |
#!/usr/bin/env python
from setuptools import setup
from Cython.Build import cythonize
setup(
name = 'pogle',
version = '0.1',
author = 'Clement Jacob',
author_email = 'clems71@gmail.com',
description = ('Python OpenGL Engine : a simplistic OpenGL engine for python'),
url='https://github.com/clems71/pogle',
license = 'MIT',
ext_modules = cythonize('pogle/*.pyx'),
keywords = 'opengl',
packages=['pogle', 'pyassimp', ],
package_dir={'pogle': 'pogle', 'pyassimp': 'pyassimp'},
install_requires = ['pyopengl', 'numpy', 'pillow', 'cyglfw3', 'cython', 'openexr'],
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Utilities',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: MIT License',
],
)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 12:42:27 2020
@author: vxr131730
"""
import pygame
import math
def draw_ellipse(A, B, width, color, line):
"""
draws ellipse between two points
A = start point (x,y)
B = end point (x,y)
width in pixel
color (r,g,b)
line thickness int, if line=0 fill ellipse
"""
# point coordinates
xA, yA = A[0], A[1]
xB, yB = B[0], B[1]
# calculate ellipse height, distance between A and B
AB = math.sqrt((xB - xA)**2 + (yB - yA)**2)
# difference between corner point coord and ellipse endpoint
def sp(theta):
return abs((width / 2 * math.sin(math.radians(theta))))
def cp(theta):
return abs((width / 2 * math.cos(math.radians(theta))))
if xB >= xA and yB < yA:
# NE quadrant
theta = math.degrees(math.asin((yA - yB) / AB))
xP = int(xA - sp(theta))
yP = int(yB - cp(theta))
elif xB < xA and yB <= yA:
# NW
theta = math.degrees(math.asin((yB - yA) / AB))
xP = int(xB - sp(theta))
yP = int(yB - cp(theta))
elif xB <= xA and yB > yA:
# SW
theta = math.degrees(math.asin((yB - yA) / AB))
xP = int(xB - sp(theta))
yP = int(yA - cp(theta))
else:
# SE
theta = math.degrees(math.asin((yA - yB) / AB))
xP = int(xA - sp(theta))
yP = int(yA - cp(theta))
# create surface for ellipse
ellipse_surface = pygame.Surface((AB, width), pygame.SRCALPHA)
# draw surface onto ellipse
pygame.draw.ellipse(ellipse_surface, color, (0, 0, AB, width), line)
# rotate ellipse
ellipse = pygame.transform.rotate(ellipse_surface, theta)
# blit ellipse onto screen
screen.blit(ellipse, (xP, yP))
screen = pygame.display.set_mode((1000, 1000))
running = True
while running:
screen.fill((255, 250, 200))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
draw_ellipse((500, 500), (420, 350), 100, (0, 255, 0), 5)
draw_ellipse((400, 600), (700, 280), 80, (255, 0, 0), 5)
draw_ellipse((260, 190), (670, 440), 50, (0, 0, 255), 5)
pygame.display.update() |
#!/usr/bin/env python Sample Test passing with nose and pytest
import unittest
import numpy as np
from gcode_gen import point
class TestPoint(unittest.TestCase):
def test_2d(self):
actual = point.Point(1, 2).arr
expect = np.asarray((1, 2, 0))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
def test_3d(self):
actual = point.Point(1, 2, 3).arr
expect = np.asarray((1, 2, 3))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
def test_offset(self):
p0 = point.Point(1, 2, 3)
self.assertIsInstance(p0, point.Point)
actual = p0.offset(5, 7, 9).arr
expect = [6, 9, 12]
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
#
p0 = point.Point(1, z=3)
self.assertIsInstance(p0, point.Point)
actual = p0.offset(z=7, y=5).arr
expect = [1, 5, 10]
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
#
def test_point_changes(self):
p0 = point.Point(1, 2)
p1 = point.Point(1, 2)
actual = point.changes(p0, p1)
expect = {}
self.assertEqual(actual, expect)
#
p0 = point.Point(1, 2)
p1 = point.Point(1, 3)
actual = point.changes(p0, p1)
expect = {'y': 3}
self.assertEqual(actual, expect)
#
p0 = point.Point(1, 2, 6)
p1 = point.Point(1, 2)
actual = point.changes(p0, p1)
expect = {'z': 0}
self.assertEqual(actual, expect)
#
p0 = point.Point(1, 2)
p1 = point.Point(1, 2, 6)
actual = point.changes(p0, p1)
expect = {'z': 6}
self.assertEqual(actual, expect)
#
p0 = point.Point(2, 2, 7)
p1 = point.Point(2.1, 2.1)
actual = point.changes(p0, p1)
expect = {'x': 2.1, 'y': 2.1, 'z': 0}
self.assertEqual(actual, expect)
class TestPointList(unittest.TestCase):
def test_empty(self):
pl = point.PointList()
self.assertEqual(pl.shape, (0, 3))
def test_PL_ZERO(self):
pl = point.PL_ZERO
self.assertEqual(pl.shape, (1, 3))
def test_2d_single(self):
pl = point.PointList()
pl.append(point.Point(1, 2))
actual = pl.arr
expect = np.asarray(((1, 2, 0), ))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
actual = pl[0].arr
expect = np.asarray((1, 2, 0))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
self.assertEqual(pl.shape, (1, 3))
self.assertEqual(pl[0].arr.shape, (3, ))
self.assertEqual(len(pl), 1)
def test_2d_multi(self):
pl = point.PointList()
pl.append(point.Point(1, 2))
pl.append(point.Point(3, 4))
actual = pl.arr
expect = np.asarray(((1, 2, 0), (3, 4, 0)))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
self.assertEqual(pl.shape, (2, 3))
self.assertEqual(len(pl), 2)
#
actual = pl[0].arr
expect = np.asarray((1, 2, 0))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
#
actual = pl[1].arr
expect = np.asarray((3, 4, 0))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
def test_3d_single(self):
pl = point.PointList()
pl.append(point.Point(1, 2, 3))
actual = pl.arr
expect = np.asarray(((1, 2, 3), ))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
self.assertEqual(pl.shape, (1, 3))
self.assertEqual(pl[0].arr.shape, (3, ))
self.assertEqual(len(pl), 1)
#
actual = pl[0].arr
expect = np.asarray((1, 2, 3))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
#
actual = pl[-1].arr
expect = np.asarray((1, 2, 3))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
def test_3d_multi(self):
pl = point.PointList()
pl.append(point.Point(1, 2, 3))
pl.append(point.Point(4, 5, 6))
actual = pl.arr
expect = np.asarray(((1, 2, 3), (4, 5, 6)))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
self.assertEqual(pl.shape, (2, 3))
self.assertEqual(len(pl), 2)
#
actual = pl[0].arr
expect = np.asarray((1, 2, 3))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
#
actual = pl[1].arr
expect = np.asarray((4, 5, 6))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
#
actual = pl[-1].arr
expect = np.asarray((4, 5, 6))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
def test_2d_3d_mix(self):
pl = point.PointList()
pl.append(point.Point(1, 2))
pl.append(point.Point(4, 5, 6))
actual = pl.arr
expect = np.asarray(((1, 2, 0), (4, 5, 6)))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
self.assertEqual(pl.shape, (2, 3))
self.assertEqual(len(pl), 2)
def test_extend_basic(self):
pl0 = point.PointList()
pl1 = point.PointList()
pl0.append(point.Point(1, 2))
pl0.append(point.Point(4, 5, 6))
pl1.append(point.Point(7, 8, 9))
pl1.append(point.Point(10, 11))
pl1.extend(pl0)
actual = pl1.arr
expect = np.asarray(((7, 8, 9), (10, 11, 0), (1, 2, 0), (4, 5, 6), ))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
self.assertEqual(pl1.shape, (4, 3))
self.assertEqual(len(pl1), 4)
def test_extend_empty_left(self):
pl0 = point.PointList()
pl1 = point.PointList()
pl0.append(point.Point(1, 2))
pl0.append(point.Point(4, 5, 6))
pl1.extend(pl0)
actual = pl1.arr
expect = np.asarray(((1, 2, 0), (4, 5, 6)))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
def test_extend_empty_right(self):
pl0 = point.PointList()
pl1 = point.PointList()
pl0.append(point.Point(1, 2))
pl0.append(point.Point(4, 5, 6))
pl0.extend(pl1)
actual = pl0.arr
expect = np.asarray(((1, 2, 0), (4, 5, 6)))
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
# def test_slice_insert_PointList(self):
# pl0 = point.PointList()
# pl1 = point.PointList()
# pl0.append(point.Point(1, 2, 3))
# pl0.append(point.Point(4, 5, 6))
# pl1.append(point.Point(7, 8, 9))
# pl1.append(point.Point(10, 11, 12))
# pl0[:0] = pl1
# actual = pl0.arr
# expect = np.asarray(((7, 8, 9), (10, 11, 12), (1, 2, 3), (4, 5, 6), ))
# self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
# def test_slice_insert_nparr(self):
# pl0 = point.PointList()
# pl1 = point.PointList()
# pl0.append(point.Point(1, 2, 3))
# pl0.append(point.Point(4, 5, 6))
# pl0[:0] = np.asarray(((7, 8, 9), (10, 11, 12), ))
# actual = pl0.arr
# expect = np.asarray(((7, 8, 9), (10, 11, 12), (1, 2, 3), (4, 5, 6), ))
# self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
def test_PointList_from_list(self):
pl = point.PointList([[1, 1, 0], [3, 1, 0], [3, 3, 0], [1, 3, 0], ])
actual = pl.arr
expect = np.asarray([[1, 1, 0], [3, 1, 0], [3, 3, 0], [1, 3, 0], ])
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
def test_PointList_from_nparr(self):
lst = [[1, 1, 0], [3, 1, 0], [3, 3, 0], [1, 3, 0], ]
arr = np.asarray(lst, dtype=np.uint8)
pl = point.PointList(arr)
actual = pl.arr
expect = np.asarray(lst, dtype=np.float64)
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
self.assertEqual(pl.arr.dtype, np.float64)
def test_PointList_slice(self):
pl_base = point.PointList([[1, 1, 0], [3, 1, 0], [3, 3, 0], [1, 3, 0], ])
pl = pl_base[1:3]
actual = pl.arr
expect = np.asarray([[3, 1, 0], [3, 3, 0], ])
self.assertTrue(np.allclose(actual, expect), 'actual: {}\nexpect:{}'.format(actual, expect))
|
from __future__ import unicode_literals, division, absolute_import
class TestRegexExtract(object):
config = """
tasks:
test_1:
mock:
- {title: 'The.Event.New.York'}
regex_extract:
prefix: event_
field: title
regex:
- The\.Event\.(?P<location>.*)
test_2:
mock:
- {title: 'TheShow.Detroit'}
regex_extract:
prefix: event_
field: title
regex:
- The\.Event\.(?P<location>.*)
test_3:
mock:
- {title: 'The.Event.New.York'}
regex_extract:
field: title
regex:
- The\.Event\.(?P<location>.*)
test_4:
mock:
- {title: 'The.Event.New.York.2015'}
regex_extract:
prefix: event_
field: title
regex:
- The\.Event\.(?P<location>[\w\.]*?)\.(?P<year>\d{4})
"""
def test_single_group(self, execute_task):
task = execute_task('test_1')
entry = task.find_entry('entries', title='The.Event.New.York')
assert entry is not None
assert 'event_location' in entry
assert entry['event_location'] == 'New.York'
def test_single_group_non_match(self, execute_task):
task = execute_task('test_2')
entry = task.find_entry('entries', title='TheShow.Detroit')
assert entry is not None
assert 'event_location' not in entry
def test_single_group_no_prefix(self, execute_task):
task = execute_task('test_3')
entry = task.find_entry('entries', title='The.Event.New.York')
assert entry is not None
assert 'location' in entry
assert entry['location'] == 'New.York'
def test_multi_group(self, execute_task):
task = execute_task('test_4')
entry = task.find_entry('entries', title='The.Event.New.York.2015')
assert entry is not None
assert 'event_location' in entry
assert 'event_year' in entry
assert entry['event_location'] == 'New.York'
assert entry['event_year'] == '2015'
|
# -*- coding: utf-8 -*-
def main():
n, c, k = list(map(int, input().split()))
t = sorted([int(input()) for _ in range(n)])
remain_seats = c - 1
departure_time = t[0] + k
bus_count = 1
# See:
# https://www.youtube.com/watch?v=cJ-WjtA34GQ
for i in range(1, n):
if remain_seats >= 1 and t[i] <= departure_time:
remain_seats -= 1
else:
bus_count += 1
remain_seats = c - 1
departure_time = t[i] + k
print(bus_count)
if __name__ == '__main__':
main()
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# This script reuses some code from https://github.com/nlpyang/BertSum
"""
Utility functions for downloading, extracting, and reading the
Swiss dataset at https://drive.switch.ch/index.php/s/YoyW9S8yml7wVhN.
"""
import nltk
# nltk.download("punkt")
from nltk import tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
import os
import regex as re
from torchtext.utils import extract_archive
import pandas
from sklearn.model_selection import train_test_split
from utils_nlp.dataset.url_utils import (
maybe_download,
maybe_download_googledrive,
extract_zip,
)
from utils_nlp.models.transformers.datasets import (
SummarizationDataset,
IterableSummarizationDataset,
)
def _target_sentence_tokenization(line):
return line.split("<q>")
def join(sentences):
return " ".join(sentences)
def SwissSummarizationDataset(top_n=-1, validation=False):
"""Load the CNN/Daily Mail dataset preprocessed by harvardnlp group."""
URLS = ["https://drive.switch.ch/index.php/s/YoyW9S8yml7wVhN/download?path=%2F&files=data_train.csv",
"https://drive.switch.ch/index.php/s/YoyW9S8yml7wVhN/download?path=%2F&files=data_test.csv",]
LOCAL_CACHE_PATH = '.data'
FILE_NAME = "data_train.csv"
maybe_download(URLS[0], FILE_NAME, LOCAL_CACHE_PATH)
dataset_path = os.path.join(LOCAL_CACHE_PATH, FILE_NAME)
train = pandas.read_csv(dataset_path).values.tolist()
if(top_n!=-1):
train = train[0:top_n]
source = [item[0] for item in train]
summary = [item[1] for item in train]
train_source,test_source,train_summary,test_summary=train_test_split(source,summary,train_size=0.95,test_size=0.05,random_state=123)
if validation:
train_source, validation_source, train_summary, validation_summary = train_test_split(
train_source, train_summary, train_size=0.9, test_size=0.1, random_state=123
)
return (
SummarizationDataset(
source_file=None,
source=train_source,
target=train_summary,
source_preprocessing=[tokenize.sent_tokenize],
target_preprocessing=[
tokenize.sent_tokenize,
],
top_n=top_n,
),
SummarizationDataset(
source_file=None,
source=validation_source,
target=validation_summary,
source_preprocessing=[tokenize.sent_tokenize],
target_preprocessing=[
tokenize.sent_tokenize,
],
top_n=top_n,
),
SummarizationDataset(
source_file=None,
source=test_source,
target=test_summary,
source_preprocessing=[tokenize.sent_tokenize],
target_preprocessing=[
tokenize.sent_tokenize,
],
top_n=top_n,
),
)
else:
return (
SummarizationDataset(
source_file=None,
source=train_source,
target=train_summary,
source_preprocessing=[tokenize.sent_tokenize],
target_preprocessing=[
tokenize.sent_tokenize,
],
top_n=top_n,
),
SummarizationDataset(
source_file=None,
source=test_source,
target=test_summary,
source_preprocessing=[tokenize.sent_tokenize],
target_preprocessing=[
tokenize.sent_tokenize,
],
top_n=top_n,
),
)
|
import logging
import yaml
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("grafana-ldap-sync-script")
class config:
def __init__(self):
self.load_config("")
def __init__(self, config_path):
self.load_config(config_path)
GRAFANA_AUTH = ""
GRAFANA_URL = ""
LDAP_SERVER_URL = ""
LDAP_PORT = ""
LDAP_USER = ""
LDAP_PASSWORD = ""
LDAP_GROUP_SEARCH_BASE = ""
LDAP_GROUP_DESCRIPTOR = ""
LDAP_GROUP_SEARCH_FILTER = ""
LDAP_MEMBER_ATTRIBUTE = ""
LDAP_IS_NTLM = False
LDAP_USE_SSL = False
LDAP_USER_LOGIN_ATTRIBUTE = ""
LDAP_USER_NAME_ATTRIBUTE = ""
LDAP_USER_MAIL_ATTRIBUTE = ""
LDAP_USER_SEARCH_BASE = ""
LDAP_USER_SEARCH_FILTER = ""
DRY_RUN = False
def load_config(self, config_path):
"""
Loads the config_mock.yml file present in the directory and fills all global variables with the defined config.
"""
try:
config = yaml.safe_load(open(config_path))["config"]
except FileNotFoundError as e:
logger.error("Config-file %s does not exist!", config_path)
raise e
self.GRAFANA_AUTH = (
config["grafana"]["user"],
config["grafana"]["password"]
)
self.GRAFANA_URL = config["grafana"]["url"]
self.LDAP_SERVER_URL = config["ldap"]["url"]
self.LDAP_PORT = config["ldap"]["port"]
self.LDAP_USER = config["ldap"]["login"]
self.LDAP_PASSWORD = config["ldap"]["password"]
self.LDAP_GROUP_SEARCH_BASE = config["ldap"]["groupSearchBase"]
self.LDAP_GROUP_SEARCH_FILTER = config["ldap"]["groupSearchFilter"]
self.LDAP_MEMBER_ATTRIBUTE = config["ldap"]["memberAttributeName"]
self.LDAP_USER_LOGIN_ATTRIBUTE = config["ldap"]["userLoginAttribute"]
self.LDAP_IS_NTLM = config["ldap"]["useNTLM"]
self.LDAP_USE_SSL = config["ldap"]["useSSL"]
self.LDAP_USER_LOGIN_ATTRIBUTE = config["ldap"]["userLoginAttribute"]
self.LDAP_USER_NAME_ATTRIBUTE = config["ldap"]["userNameAttribute"]
self.LDAP_USER_MAIL_ATTRIBUTE = config["ldap"]["userMailAttribute"]
self.LDAP_USER_SEARCH_BASE = config["ldap"]["userSearchBase"]
self.LDAP_USER_SEARCH_FILTER = config["ldap"]["userSearchFilter"]
|
"""
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
import os
from tests import test_settings
from tests.base_test import ArchesTestCase
from django.urls import reverse
from django.test.client import RequestFactory, Client
from arches.app.views.api import APIBase
from arches.app.models.graph import Graph
from arches.app.models.resource import Resource
from arches.app.models.tile import Tile
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
# these tests can be run from the command line via
# python manage.py test tests/views/api_tests.py --pattern="*.py" --settings="tests.test_settings"
class APITests(ArchesTestCase):
def setUp(self):
pass
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
geojson_nodeid = "3ebc6785-fa61-11e6-8c85-14109fd34195"
cls.loadOntology()
with open(os.path.join("tests/fixtures/resource_graphs/unique_graph_shape.json"), "rU") as f:
json = JSONDeserializer().deserialize(f)
cls.unique_graph = Graph(json["graph"][0])
cls.unique_graph.save()
with open(os.path.join("tests/fixtures/resource_graphs/ambiguous_graph_shape.json"), "rU") as f:
json = JSONDeserializer().deserialize(f)
cls.ambiguous_graph = Graph(json["graph"][0])
cls.ambiguous_graph.save()
with open(os.path.join("tests/fixtures/resource_graphs/phase_type_assignment.json"), "rU") as f:
json = JSONDeserializer().deserialize(f)
cls.phase_type_assignment_graph = Graph(json["graph"][0])
cls.phase_type_assignment_graph.save()
def test_api_base_view(self):
"""
Test that our custom header parameters get pushed on to the GET QueryDict
"""
factory = RequestFactory(HTTP_X_ARCHES_VER="2.1")
view = APIBase.as_view()
request = factory.get(reverse("mobileprojects", kwargs={}), {"ver": "2.0"})
request.user = None
response = view(request)
self.assertEqual(request.GET.get("ver"), "2.0")
request = factory.get(reverse("mobileprojects"), kwargs={})
request.user = None
response = view(request)
self.assertEqual(request.GET.get("ver"), "2.1")
|
import numpy as np
import cv2
from pylab import *
from sklearn.svm import SVC
from scipy.ndimage import zoom
from sklearn.externals import joblib
#loading a classifier model
svc_1 = joblib.load('smile.joblib.pkl')
def detect_face(frame):
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
detected_faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=6,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
return gray, detected_faces
def extract_face_features(gray, detected_face, offset_coefficients):
(x, y, w, h) = detected_face
horizontal_offset = offset_coefficients[0] * w
vertical_offset = offset_coefficients[1] * h
extracted_face = gray[y+vertical_offset:y+h,
x+horizontal_offset:x-horizontal_offset+w]
new_extracted_face = zoom(extracted_face, (64. / extracted_face.shape[0],
64. / extracted_face.shape[1]))
new_extracted_face = new_extracted_face.astype(float32)
new_extracted_face /= float(new_extracted_face.max())
return new_extracted_face
def predict_face_is_smiling(extracted_face):
return svc_1.predict(extracted_face.ravel())
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
#open-webcam
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
# detect faces
gray, detected_faces = detect_face(frame)
face_index = 0
# predict output
for face in detected_faces:
(x, y, w, h) = face
if w > 100:
# draw rectangle around face
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# extract features
extracted_face = extract_face_features(gray, face, (0.03, 0.05)) #(0.075, 0.05)
# predict smile
prediction_result = predict_face_is_smiling(extracted_face)
# draw extracted face in the top right corner
frame[face_index * 64: (face_index + 1) * 64, -65:-1, :] = cv2.cvtColor(extracted_face * 255, cv2.COLOR_GRAY2RGB)
# annotate main image with a label
if prediction_result == 1:
cv2.putText(frame, "SMILING",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2, 155, 6)
else:
cv2.putText(frame, "not smiling",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2, 155, 10)
# increment counter
face_index += 1
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
import math
from .json_map_data import polys, county_names, refs
from shapely.geometry import Point, Polygon
def get_circle_coord(theta, x_center, y_center, radius):
x = radius * math.cos(theta) + x_center
y = radius * math.sin(theta) + y_center
return [round(x), round(y)]
# This function gets all the pairs of coordinates
def get_all_circle_coords(x_center, y_center, radius, n_points):
thetas = [i / n_points * math.tau for i in range(n_points + 1)]
circle_coords = [get_circle_coord(theta, x_center, y_center, radius) for theta in thetas]
return circle_coords
def find_intersecting_counties(lat, long, radius):
"""Function to return intersecting counties"""
lat_ratio = ((41.148339 - 36.981528) / 3344)
long_ratio = ((-89.638487 + 91.511353) / 1061)
radius = radius / 69
radius = int(radius / lat_ratio)
lat_pre = abs(refs[1] - lat)
long_pre = abs(refs[0] - long)
y_coord = int(long_pre / long_ratio)
x_coord = int(lat_pre / lat_ratio)
center = [y_coord, x_coord]
coords = get_all_circle_coords(center[0], center[1], radius, 60)
circle = Polygon(coords)
intersectors = set()
for i, shape in enumerate(polys):
if shape.intersects(circle):
intersectors.add(county_names[i])
coords_real = []
for pos in coords:
y = pos[0] * long_ratio
x = pos[1] * lat_ratio
coords_real.append([y + refs[1], x + refs[0]])
circle_geo = Polygon(coords_real)
return intersectors, circle_geo
|
def skyhook_calculator(upper_vel,delta_vel):
Cmax = 4000
Cmin = 300
epsilon = 0.0001
alpha = 0.5
sat_limit = 800
if upper_vel * delta_vel >= 0:
C = (alpha * Cmax * upper_vel + (1 - alpha) * Cmax * upper_vel)/(delta_vel + epsilon)
C = min(C,Cmax)
u = C * delta_vel
else:
u = Cmin*delta_vel
if u >= 0:
if u > sat_limit:
u_ = sat_limit
else:
u_ = u
else:
if u < -sat_limit:
u_ = -sat_limit
else:
u_ = u
return u_
def skyhook(env):
# env.state_SH = [fl2,tfl2,fr2,tfr2,rl2,trl2,rr2,trr2]
dz_fl = env.state_SH[0]
vel_fl = dz_fl - env.state_SH[1]
u_fl = skyhook_calculator(dz_fl,vel_fl)
if len(env.state_SH) > 2:
dz_fr = env.state_SH[2]
dz_rl = env.state_SH[4]
dz_rr = env.state_SH[6]
vel_fr = dz_fr - env.state_SH[3]
vel_rl = dz_rl - env.state_SH[5]
vel_rr = dz_rr - env.state_SH[7]
u_fr = skyhook_calculator(dz_fr,vel_fr)
u_rl = skyhook_calculator(dz_rl,vel_rl)
u_rr = skyhook_calculator(dz_rr,vel_rr)
return [u_fl,u_fr,u_rl,u_rr]
else:
return u_fl |
import pytest
from prefect.tasks.sql_server import (
SqlServerExecute,
SqlServerExecuteMany,
SqlServerFetch,
)
class TestSqlServerExecute:
def test_construction(self):
task = SqlServerExecute(db_name="test", user="test", host="test")
assert task.commit is False
def test_query_string_must_be_provided(self):
task = SqlServerExecute(db_name="test", user="test", host="test")
with pytest.raises(ValueError, match="A query string must be provided"):
task.run()
class TestSqlServerExecuteMany:
def test_construction(self):
task = SqlServerExecuteMany(db_name="test", user="test", host="test")
assert task.commit is False
def test_query_string_must_be_provided(self):
task = SqlServerExecuteMany(db_name="test", user="test", host="test")
with pytest.raises(ValueError, match="A query string must be provided"):
task.run()
def test_data_list_must_be_provided(self):
task = SqlServerExecuteMany(
db_name="test", user="test", host="test", query="test"
)
with pytest.raises(ValueError, match="A data list must be provided"):
task.run()
class TestSqlServerFetch:
def test_construction(self):
task = SqlServerFetch(db_name="test", user="test", host="test")
assert task.fetch == "one"
def test_query_string_must_be_provided(self):
task = SqlServerFetch(db_name="test", user="test", host="test")
with pytest.raises(ValueError, match="A query string must be provided"):
task.run()
def test_bad_fetch_param_raises(self):
task = SqlServerFetch(db_name="test", user="test", host="test")
with pytest.raises(
ValueError,
match=r"The 'fetch' parameter must be one of the following - \('one', 'many', 'all'\)",
):
task.run(query="SELECT * FROM some_table", fetch="not a valid parameter")
|
"""
| Copyright (C) 2014 Daniel Thiele
| TU Braunschweig, Germany
| All rights reserved.
| See LICENSE file for copyright and license details.
:Authors:
- Daniel Thiele (thiele@ida.ing.tu-bs.de)
Description
-----------
XLS parser example
"""
from pycpa import util
from pycpa import xls_parser
import sys
def xls_parser_test(filename):
x = xls_parser.XLS_parser(filename)
x.parse()
print("Using file: %s" % filename)
print("Sheets: %s" % x.sheets.keys())
print("All data: %s" % x.sheets)
print("Sheet \"foo\", line 1 (index starts at zero): %s" % x.get_line_of_sheet('foo', 1))
print("Sheet \"foo\", column \"A\", line 2 (index starts at zero): %s" % x.get_line_entry_of_sheet('foo', 2, 'A'))
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Call with xls_parser_example.xls as single argument.")
else:
xls_parser_test(sys.argv[1])
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlLuchtkwaliteitOpstellingMerk(KeuzelijstField):
"""Het merk van een onderdeel uit een luchtkwaliteitsinstallatie."""
naam = 'KlLuchtkwaliteitOpstellingMerk'
label = 'Luchtkwaliteitsopstelling merk'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#KlLuchtkwaliteitOpstellingMerk'
definition = 'Het merk van een onderdeel uit een luchtkwaliteitsinstallatie.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlLuchtkwaliteitOpstellingMerk'
options = {
'sick': KeuzelijstWaarde(invulwaarde='sick',
label='sick',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlLuchtkwaliteitOpstellingMerk/sick')
}
|
# if you want to train without using the disentanglement_lib infrastructure
# you can train using this file.
# This file's train method is very, very similar to
# jlonevae_lib/train/train_jlonevae_models.py's train method
# This is just an object-oriented version of that more script-based version
# this file's train method also has a "save model every" and expects the caller
# to do the parameter annealing already.
# If you're using disentanglement_lib you might as well use that file.
# If you're using a custom dataloader it probably makes sense to use this file.
import torch
import torch.utils.tensorboard
import os
import numpy as np
from jlonevae_lib.architecture.save_model import save_conv_vae
import jlonevae_lib.architecture.vae_jacobian as vj
from jlonevae_lib.train.loss_function import vae_loss_function
# based on reviewer feedback:
## gamma_type identifies whether we are regularizing the L1 norm of the jacobian (jlone)
## or whether we are regularizing the L2 norm of the jacobian (jltwo)
class JLOneVAETrainer(object):
def __init__(self, model, data_loader, beta, gamma, device,
log_dir, lr, annealingBatches, record_loss_every=100, regularization_type="jlone"):
self.model = model
self.data_loader = data_loader
self.beta = beta
self.gamma = gamma
self.optimizer= torch.optim.Adam(self.model.parameters(), lr=lr)
self.device = device
self.log_dir = log_dir
self.writer = torch.utils.tensorboard.SummaryWriter(log_dir=self.log_dir)
self.num_batches_seen = 0
self.annealingBatches = annealingBatches
self.record_loss_every = record_loss_every
self.regularization_type = regularization_type
def train(self):
# set model to "training mode"
self.model.train()
# for each chunk of data in an epoch
for data in self.data_loader:
# do annealing and store annealed value to tmp value
if self.num_batches_seen < self.annealingBatches:
tmp_beta = self.beta * self.num_batches_seen / self.annealingBatches
tmp_gamma = self.gamma * self.num_batches_seen / self.annealingBatches
else:
tmp_beta = self.beta
tmp_gamma = self.gamma
# move data to device, initialize optimizer, model data, compute loss,
# and perform one optimizer step
data = data.to(self.device)
self.optimizer.zero_grad()
recon_batch, mu, logvar, noisy_mu = self.model(data)
loss, NegLogLikelihood, KLD, mu_error, logvar_error = vae_loss_function(recon_batch,
data, mu, logvar, tmp_beta)
# short-circuit calc if gamma is 0 (no JL1-VAE loss)
if tmp_gamma == 0:
ICA_loss = torch.tensor(0)
elif self.regularization_type == "jlone":
ICA_loss = vj.jacobian_loss_function(self.model, noisy_mu, logvar, self.device)
elif self.regularization_type == "jltwo":
ICA_loss = vj.jacobian_l2_loss_function(self.model, noisy_mu, logvar, self.device)
else:
raise RuntimeException(f"Unknown gamma type {self.gamma_type}. Valid options are 'jlone' and 'jltwo'")
loss += tmp_gamma * ICA_loss
loss.backward()
self.optimizer.step()
# log to tensorboard
self.num_batches_seen += 1
if self.num_batches_seen % self.record_loss_every == 0:
self.writer.add_scalar("ICALoss/train", ICA_loss.item(), self.num_batches_seen)
self.writer.add_scalar("ELBO/train", loss.item(), self.num_batches_seen)
self.writer.add_scalar("KLD/train", KLD.item(), self.num_batches_seen)
self.writer.add_scalar("MuDiv/train", mu_error.item(), self.num_batches_seen)
self.writer.add_scalar("VarDiv/train", logvar_error.item(), self.num_batches_seen)
self.writer.add_scalar("NLL/train", NegLogLikelihood.item(), self.num_batches_seen)
self.writer.add_scalar("beta", tmp_beta, self.num_batches_seen)
self.writer.add_scalar("gamma", tmp_gamma, self.num_batches_seen)
|
import urllib2
import urllib
import json
import pprint
import base64
import pprint
import redis
import os
#from confluent_kafka import Producer
#from hdfs import TokenClient
#from hdfs import InsecureClient
#client = TokenClient('http://node-1.testing:50070/', 'root', root='/user/root/data_trentino')
#client = InsecureClient('http://node-1.testing:50070/', 'root', root='/user/root/open_data')
#request = urllib2.Request("http://93.63.32.36/api/3/action/group_list")
#URL_DATI_TRENTINO = "http://dati.trentino.it"
#URL_DATI_GOV = "http://93.63.32.36"
#URL_DATI_GOV = "http://156.54.180.185"
URL_DATI_GOV = "http://192.168.0.33"
class HeadRequest(urllib2.Request):
def get_method(self):
return "HEAD"
class MasterCrawler:
def __init__(self, url_ckan, redis_ip, redis_port):
self.ckan = url_ckan
self.r = redis.StrictRedis(host=redis_ip, port=redis_port, db=0)
def formatUrl(self, url):
urlSplit = url.rsplit('/', 1)
urlEnd = urllib.quote(urlSplit[1])
urlStart = urlSplit[0]
finalUrl = urlStart + "/" + urlEnd
return finalUrl
def initializeRedis(self):
if not os.path.isfile("new_dati_gov_status.json"):
with open('new_dati_gov_status.json', 'w') as writer:
writer.write('')
request = urllib2.Request(URL_DATI_GOV + "/api/3/action/package_list")
response = urllib2.urlopen(request)
assert response.code == 200
response_dict = json.loads(response.read())
# Check the contents of the response.
assert response_dict['success'] is True
result = response_dict['result']
test_res = result #[:2000]
for res in test_res:
print res
self.r.rpush("dataset_id", res)
def consumeData(self):
red = self.r
while(red.llen("dataset_id") != 0):
dataset_id = red.lpop("dataset_id")
encRes = urllib.urlencode({"id" : unicode(dataset_id).encode('utf-8')})
request_info = urllib2.Request(URL_DATI_GOV + "/api/3/action/package_show?" + encRes)
#request_info.add_header("Authorization", "Basic %s" % base64string)
try:
response_info = urllib2.urlopen(request_info)
info_dataset = json.loads(response_info.read())
results = info_dataset['result']
info = results
#print json.dumps(info)
if 'resources' in info:
#print info
info["m_status_resources"] = "ok"
resources = info['resources']
name = info['name']
idInfo = info['id']
for resource in resources:
rUrl = resource['url']
rFormat = resource['format']
rName = resource['name']
rId = resource['id']
finalUrl = self.formatUrl(rUrl)
print finalUrl
rInfo = urllib2.Request(finalUrl)
try:
rReq = urllib2.urlopen(rInfo)
if rReq.code == 200:
resource["m_status"] = "ok"
if "csv" in rFormat.lower():
print "qui passo"
data = rReq.read()
data_dir = "./open_data/" + dataset_id
print data_dir
if not os.path.exists(data_dir):
os.makedirs(data_dir)
file_path = data_dir + "/" + rId + "_" + rFormat + ".csv"
with open(file_path, "wb") as code:
code.write(data)
if "json" in rFormat.lower():
data = rReq.read()
data_dir = "./open_data/" + dataset_id
if not os.path.exists(data_dir):
os.makedirs(data_dir)
file_path = data_dir + "/" + rId + "_" + rFormat + ".json"
with open(file_path, "wb") as code:
code.write(data)
else:
resource["m_status"] = "ko"
except Exception, e:
resource["m_status"] = "ko"
print str(e)
else:
print info
info["m_status_resources"] = "ko"
print "NO RESOURCES"
#rData = rReq.read()
with open('new_dati_gov_status.json','a') as writer:
writer.write(json.dumps(info) + '\n')
except Exception, e:
print str(e)
red.lpush("dataset_error", dataset_id)
#URL_DATI_TRENTINO = "http://dati.trentino.it"
#URL_DATI_GOV = "http://93.63.32.36"
URL_NEW_DATI_GOV = "http://156.54.180.185"
REDIS_IP = "localhost"
REDIS_PORT = 6379
crawler = MasterCrawler(URL_NEW_DATI_GOV,REDIS_IP,REDIS_PORT)
crawler.initializeRedis()
crawler.consumeData()
|
import pyautogui
pyautogui.FAILSAFE = True
time1 = 8
time2 = 8
time3 = 8
#go to top right of dock, cook food
pyautogui.PAUSE = time1
pyautogui.click(1828, 119)
pyautogui.click(1707, 614)
pyautogui.click(1643, 615)
pyautogui.click(1833, 933)
pyautogui.click(1050, 530)
pyautogui.press('space')
input("Ready to continue?")
#top right part of dock to general store
pyautogui.PAUSE = time2
pyautogui.click(1783, 264)
pyautogui.click(1720, 226)
pyautogui.click(421, 701)
input("Ready to continue?")
#from store between two posts to logs to dock
pyautogui.PAUSE = time3
pyautogui.click(1902, 149)
pyautogui.click(1890, 189)
pyautogui.click(1427, 275)
pyautogui.click(1692, 134)
pyautogui.click(1741, 73)
pyautogui.click(1851, 74)
print("done!")
|
#
# PySNMP MIB module Wellfleet-SPAN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Wellfleet-SPAN-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:41:40 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Gauge32, IpAddress, ObjectIdentity, Counter32, Integer32, NotificationType, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, ModuleIdentity, iso, MibIdentifier, TimeTicks, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "IpAddress", "ObjectIdentity", "Counter32", "Integer32", "NotificationType", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "ModuleIdentity", "iso", "MibIdentifier", "TimeTicks", "Unsigned32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
wfSpanningTree, = mibBuilder.importSymbols("Wellfleet-COMMON-MIB", "wfSpanningTree")
wfBrStp = MibIdentifier((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1))
wfBrStpBaseDelete = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfBrStpBaseDelete.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpBaseDelete.setDescription('Create/Delete parameter. Default is created. Users perform an SNMP SET operation on this object in order to create/delete the Spanning tree.')
wfBrStpBaseEnable = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfBrStpBaseEnable.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpBaseEnable.setDescription('Enable/Disable parameter. Default is enabled. Users perform an SNMP SET operation on this object in order to enable/disable the spanning tree.')
wfBrStpBaseState = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("init", 3), ("pres", 4))).clone('down')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpBaseState.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpBaseState.setDescription('The current state of the spanning tree.')
wfBrStpProtocolSpecification = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("declb100", 2), ("ieee8021d", 3))).clone('ieee8021d')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpProtocolSpecification.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpProtocolSpecification.setDescription('The version of the Spanning Tree protocol being run.')
wfBrStpBridgeID = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 5), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfBrStpBridgeID.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpBridgeID.setDescription('The Spanning Tree Bridge ID assigned to this bridge. It is a 8-octet string. The first two octets make up the bridge priority, and the last six are the MAC address of this bridge, which is commonly the MAC address of the first port on the bridge.')
wfBrStpTimeSinceTopologyChange = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpTimeSinceTopologyChange.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpTimeSinceTopologyChange.setDescription('The time (in hundredths of a second) since the last topology change was detected by the bridge.')
wfBrStpTopChanges = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpTopChanges.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpTopChanges.setDescription('The total number of topology changes detected by this bridge since it was last reset or initialized')
wfBrStpDesignatedRoot = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 8), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpDesignatedRoot.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpDesignatedRoot.setDescription('The bridge ID of the root of the spanning tree as determined by the Spanning Tree Protocol as executed by the bridge. This value is used as the Root Identifier parameter in all Configuration Bridge PDUs originated by this node. It is a 8-octet string. The first two octets make up the priority, and the last six are the MAC address of the designated root bridge.')
wfBrStpRootCost = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpRootCost.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpRootCost.setDescription('The cost of the path to the root as seen from this bridge.')
wfBrStpRootPort = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpRootPort.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpRootPort.setDescription('The port identifier of the port which offers the lowest cost path from this bridge to the root bridge.')
wfBrStpMaxAge = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpMaxAge.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpMaxAge.setDescription('The maximum age of the Spanning Tree Protocol information learned from the network on any port before it is discarded, in hundredths of a second. This represents the value actually in use by the bridge.')
wfBrStpHelloTime = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpHelloTime.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpHelloTime.setDescription('The amount of time between transmission of configuration BPDUs by this bridge on any port, when it is the root of the spanning tree or trying to become so, in hundreths of a second. This represents the value actually in use by the bridge.')
wfBrStpHoldTime = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(100))).clone(namedValues=NamedValues(("time", 100))).clone('time')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpHoldTime.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpHoldTime.setDescription('The value that determines the interval length during which no more than two configuration BPDUs shall be transmitted by this bridge, in hundredths of a second.')
wfBrStpForwardDelay = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpForwardDelay.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpForwardDelay.setDescription('The time, in hundredths of a second, that a port on this bridge will stay in a transitional state (e.g. LISTENING) before moving to the next state (e.g. LEARNING). This value is also used to age all dynamic entries in the Forwarding Database when a topology changed has been detected and is underway. This represents the value actually in use by the bridge.')
wfBrStpBridgeMaxAge = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(600, 4000)).clone(2000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfBrStpBridgeMaxAge.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpBridgeMaxAge.setDescription('The value that all bridges use for MaxAge when this bridge acting as the root.')
wfBrStpBridgeHelloTime = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(100, 1000)).clone(200)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfBrStpBridgeHelloTime.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpBridgeHelloTime.setDescription('The value that all bridges use for HelloTime when this bridge acting as the root.')
wfBrStpBridgeForwardDelay = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(400, 3000)).clone(1500)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfBrStpBridgeForwardDelay.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpBridgeForwardDelay.setDescription('The value that all bridges use for ForwardDelay when this bridge acting as the root.')
wfBrStpBaseTrueConverge = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfBrStpBaseTrueConverge.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpBaseTrueConverge.setDescription('Enable/Disable parameter. Default is disabled. Allow spanning tree convergence times to become predictable and accurate to settings specified in user configuration.')
wfBrStpInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2), )
if mibBuilder.loadTexts: wfBrStpInterfaceTable.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfaceTable.setDescription('inst_id[1] = wfBrStpInterfaceCircuit')
wfBrStpInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1), ).setIndexNames((0, "Wellfleet-SPAN-MIB", "wfBrStpInterfaceCircuit"))
if mibBuilder.loadTexts: wfBrStpInterfaceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfaceEntry.setDescription('An entry in wfBrStpInterface.')
wfBrStpInterfaceDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfBrStpInterfaceDelete.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfaceDelete.setDescription('Create/Delete parameter. Default is created. Users perform an SNMP SET operation on this object in order to create/delete an interface for the Spanning Tree.')
wfBrStpInterfaceEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfBrStpInterfaceEnable.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfaceEnable.setDescription('Enable/Disable parameter. Default is enabled. Users perform an SNMP SET operation on this object in order to enable/disable an interface for the Spanning Tree.')
wfBrStpInterfaceCircuit = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpInterfaceCircuit.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfaceCircuit.setDescription('The circuit number on which this interface is defined.')
wfBrStpInterfacePriority = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255)).clone(128)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfBrStpInterfacePriority.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfacePriority.setDescription('The value of the priority field which is contained in the first octet of the (2 octet long) port ID for this circuit. The other octet of the port ID is given by wfBrStpInterfaceCircuit.')
wfBrStpInterfaceState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("disabled", 1), ("blocking", 2), ("listening", 3), ("learning", 4), ("forwarding", 5), ("broken", 6))).clone('disabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpInterfaceState.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfaceState.setDescription('The current state of the port as defined by the application of the Spanning Tree Protocol.')
wfBrStpInterfaceMultiCastAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 6), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpInterfaceMultiCastAddr.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfaceMultiCastAddr.setDescription('The MAC address used as the destination for all BPDU packets generated out this port.')
wfBrStpInterfacePathCost = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfBrStpInterfacePathCost.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfacePathCost.setDescription('The contribution of this port the the root path cost of paths toward the spanning tree root which include this port.')
wfBrStpInterfaceDesignatedRoot = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 8), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpInterfaceDesignatedRoot.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfaceDesignatedRoot.setDescription('The unique Bridge Identifier of the Bridge recorded as the Root in the Configuration BPDUs transmitted by the Designated Bridge for the segment to which this port is attached. Bridge IDs are made up of a 2-byte priority field, and a 6-byte MAC address.')
wfBrStpInterfaceDesignatedCost = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpInterfaceDesignatedCost.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfaceDesignatedCost.setDescription('The path cost of the Designated Port of the segment connected to this port. This value is compared to the Root Path Cost field in received bridge PDUs.')
wfBrStpInterfaceDesignatedBridge = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 10), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpInterfaceDesignatedBridge.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfaceDesignatedBridge.setDescription("The Bridge Identifier of the bridge which this port considers to be the Designated Bridge for this port's segment.")
wfBrStpInterfaceDesignatedPort = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpInterfaceDesignatedPort.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfaceDesignatedPort.setDescription("The Port Identifier of the port on the Designated Bridge for this port's segment.")
wfBrStpInterfaceForwardTransitions = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpInterfaceForwardTransitions.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfaceForwardTransitions.setDescription('The number of times this port has transitioned from the Learning State to the Forwarding state.')
wfBrStpInterfacePktsXmitd = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpInterfacePktsXmitd.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfacePktsXmitd.setDescription('The number of BPDU packets transmitted out this port')
wfBrStpInterfacePktsRcvd = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfBrStpInterfacePktsRcvd.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfacePktsRcvd.setDescription('The number of BPDU packets received on this port')
wfBrStpInterfaceTranslationDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 1, 2, 2, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfBrStpInterfaceTranslationDisable.setStatus('mandatory')
if mibBuilder.loadTexts: wfBrStpInterfaceTranslationDisable.setDescription('Enable/Disable translation bridging parameter. Default is disabled.')
mibBuilder.exportSymbols("Wellfleet-SPAN-MIB", wfBrStpTimeSinceTopologyChange=wfBrStpTimeSinceTopologyChange, wfBrStpForwardDelay=wfBrStpForwardDelay, wfBrStpInterfaceState=wfBrStpInterfaceState, wfBrStpInterfaceDesignatedCost=wfBrStpInterfaceDesignatedCost, wfBrStpMaxAge=wfBrStpMaxAge, wfBrStpBaseState=wfBrStpBaseState, wfBrStpInterfaceTable=wfBrStpInterfaceTable, wfBrStpBaseTrueConverge=wfBrStpBaseTrueConverge, wfBrStpInterfacePktsRcvd=wfBrStpInterfacePktsRcvd, wfBrStpBridgeForwardDelay=wfBrStpBridgeForwardDelay, wfBrStpInterfacePktsXmitd=wfBrStpInterfacePktsXmitd, wfBrStpTopChanges=wfBrStpTopChanges, wfBrStpInterfaceDesignatedPort=wfBrStpInterfaceDesignatedPort, wfBrStpBridgeID=wfBrStpBridgeID, wfBrStpHelloTime=wfBrStpHelloTime, wfBrStpInterfaceForwardTransitions=wfBrStpInterfaceForwardTransitions, wfBrStp=wfBrStp, wfBrStpHoldTime=wfBrStpHoldTime, wfBrStpInterfaceMultiCastAddr=wfBrStpInterfaceMultiCastAddr, wfBrStpBaseDelete=wfBrStpBaseDelete, wfBrStpBridgeMaxAge=wfBrStpBridgeMaxAge, wfBrStpInterfaceDelete=wfBrStpInterfaceDelete, wfBrStpRootCost=wfBrStpRootCost, wfBrStpInterfaceEnable=wfBrStpInterfaceEnable, wfBrStpInterfacePathCost=wfBrStpInterfacePathCost, wfBrStpInterfacePriority=wfBrStpInterfacePriority, wfBrStpBaseEnable=wfBrStpBaseEnable, wfBrStpInterfaceCircuit=wfBrStpInterfaceCircuit, wfBrStpProtocolSpecification=wfBrStpProtocolSpecification, wfBrStpInterfaceDesignatedRoot=wfBrStpInterfaceDesignatedRoot, wfBrStpInterfaceDesignatedBridge=wfBrStpInterfaceDesignatedBridge, wfBrStpInterfaceTranslationDisable=wfBrStpInterfaceTranslationDisable, wfBrStpRootPort=wfBrStpRootPort, wfBrStpInterfaceEntry=wfBrStpInterfaceEntry, wfBrStpBridgeHelloTime=wfBrStpBridgeHelloTime, wfBrStpDesignatedRoot=wfBrStpDesignatedRoot)
|
# -*- coding: utf-8 -*-
"""
Created on Apr 27, 2016
@author: Aaron Ponti
"""
class GlobalSettings(object):
'''
Store global settings to be used in the dropbox.
'''
# Image resolutions to be used to generate the images ("thumbnails") that
# are displayed in the image viewer. Examples:
#
# ["128x128"]
# ["128x128", "256x256"]
#
# Set to [] to disable generation of the thumbnails. If ImageResolutions is
# set to [], the image viewer will generate the views on the fly at the
# full the resolution and at 1/4 of the full resolution (in X and Y). Please
# notice that a minimum resolution of 128x128 is enforced.
ImageResolutions = []
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TaskDependencies(Model):
"""
Specifies any dependencies of a task. Any task that is explicitly
specified or within a dependency range must complete before the dependant
task will be scheduled.
:param task_ids: The list of task ids that must complete before this task
can be scheduled.
:type task_ids: list of str
:param task_id_ranges: The list of task ranges that must complete before
this task can be scheduled.
:type task_id_ranges: list of :class:`TaskIdRange
<azure.batch.models.TaskIdRange>`
"""
_attribute_map = {
'task_ids': {'key': 'taskIds', 'type': '[str]'},
'task_id_ranges': {'key': 'taskIdRanges', 'type': '[TaskIdRange]'},
}
def __init__(self, task_ids=None, task_id_ranges=None):
self.task_ids = task_ids
self.task_id_ranges = task_id_ranges
|
"""
Inspired by EnsembleStrategy from https://github.com/joaorafaelm/freqtrade-heroku/
Created by https://github.com/raph92/
"""
from __future__ import annotations
import concurrent
import logging
import sys
import time
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from pathlib import Path
from typing import Dict
import pandas as pd
import rapidjson
from freqtrade.enums import SellType
from freqtrade.persistence import Trade
from freqtrade.resolvers import StrategyResolver
from freqtrade.strategy import (
IStrategy,
DecimalParameter,
stoploss_from_open,
CategoricalParameter,
)
from freqtrade.strategy.interface import SellCheckTuple
sys.path.append(str(Path(__file__).parent))
logger = logging.getLogger(__name__)
ensemble_path = Path("user_data/strategies/ensemble.json")
# Loads strategies from ensemble.json. Or you can add them manually
STRATEGIES = []
if not STRATEGIES and ensemble_path.exists():
STRATEGIES = rapidjson.loads(ensemble_path.resolve().read_text())
# raise an exception if no strategies are in the list
if not STRATEGIES:
raise ValueError("No strategies added to strategy list")
keys_to_delete = [
"minimal_roi",
"stoploss",
"ignore_roi_if_buy_signal",
]
class ConductorStrategy(IStrategy):
"""Inspired by EnsembleStrategy from https://github.com/joaorafaelm/freqtrade-heroku/"""
loaded_strategies = {}
stoploss = -0.31
minimal_roi = {"0": 0.1669, "19": 0.049, "61": 0.023, "152": 0}
# sell_profit_offset = (
# 0.001 # it doesn't meant anything, just to guarantee there is a minimal profit.
# )
use_sell_signal = True
ignore_roi_if_buy_signal = True
sell_profit_only = False
# Custom stoploss
use_custom_stoploss = True
# Run "populate_indicators()" only for new candle.
process_only_new_candles = True
# Number of candles the strategy requires before producing valid signals
startup_candle_count: int = 200
plot_config = {
"main_plot": {
"buy_sell": {
"sell_tag": {"color": "red"},
"buy_tag": {"color": "blue"},
},
}
}
use_custom_stoploss_opt = CategoricalParameter(
[True, False], default=False, space="buy"
)
# region trailing stoploss hyperopt parameters
# hard stoploss profit
pHSL = DecimalParameter(
-0.200,
-0.040,
default=-0.15,
decimals=3,
space="sell",
optimize=True,
load=True,
)
# profit threshold 1, trigger point, SL_1 is used
pPF_1 = DecimalParameter(
0.008, 0.020, default=0.016, decimals=3, space="sell", optimize=True, load=True
)
pSL_1 = DecimalParameter(
0.008, 0.020, default=0.014, decimals=3, space="sell", optimize=True, load=True
)
# profit threshold 2, SL_2 is used
pPF_2 = DecimalParameter(
0.040, 0.100, default=0.024, decimals=3, space="sell", optimize=True, load=True
)
pSL_2 = DecimalParameter(
0.020, 0.070, default=0.022, decimals=3, space="sell", optimize=True, load=True
)
# endregion
slippage_protection = {"retries": 3, "max_slippage": -0.02}
def __init__(self, config: dict) -> None:
super().__init__(config)
# self.gui_thread = None
logger.info(f"Buy strategies: {STRATEGIES}")
if self.is_live_or_dry:
self.trailing_stop = True
self.use_custom_stoploss = False
else:
self.trailing_stop = False
self.use_custom_stoploss = True
@property
def is_live_or_dry(self):
return self.config["runmode"].value in ("live", "dry_run")
def custom_stoploss(
self,
pair: str,
trade: "Trade",
current_time: datetime,
current_rate: float,
current_profit: float,
**kwargs,
) -> float:
"""Custom Trailing Stoploss by Perkmeister"""
if not self.use_custom_stoploss_opt.value:
return self.stoploss
# hard stoploss profit
hsl = self.pHSL.value
pf_1 = self.pPF_1.value
sl_1 = self.pSL_1.value
pf_2 = self.pPF_2.value
sl_2 = self.pSL_2.value
# For profits between PF_1 and PF_2 the stoploss (sl_profit) used is linearly interpolated
# between the values of SL_1 and SL_2. For all profits above PL_2 the sl_profit value
# rises linearly with current profit, for profits below PF_1 the hard stoploss profit is used.
if current_profit > pf_2:
sl_profit = sl_2 + (current_profit - pf_2)
elif current_profit > pf_1:
sl_profit = sl_1 + ((current_profit - pf_1) * (sl_2 - sl_1) / (pf_2 - pf_1))
else:
sl_profit = hsl
return stoploss_from_open(sl_profit, current_profit) or self.stoploss
def informative_pairs(self):
inf_pairs = []
# get inf pairs for all strategies
for s in STRATEGIES:
strategy = self.get_strategy(s)
inf_pairs.extend(strategy.informative_pairs())
# remove duplicates
return list(set(inf_pairs))
def get_strategy(self, strategy_name):
"""
Get strategy from strategy name
:param strategy_name: strategy name
:return: strategy class
"""
strategy = self.loaded_strategies.get(strategy_name)
if not strategy:
config = self.config.copy()
config["strategy"] = strategy_name
for k in keys_to_delete:
try:
del config[k]
except KeyError:
pass
strategy = StrategyResolver.load_strategy(config)
self.startup_candle_count = max(
self.startup_candle_count, strategy.startup_candle_count
)
strategy.dp = self.dp
strategy.wallets = self.wallets
self.loaded_strategies[strategy_name] = strategy
return strategy
def analyze(self, pairs: list[str]) -> None:
"""used in live"""
t1 = time.time()
with ThreadPoolExecutor(max_workers=2) as executor:
futures = []
for pair in pairs:
futures.append(executor.submit(self.analyze_pair, pair))
for future in concurrent.futures.as_completed(futures):
future.result()
logger.info("Analyzed everything in %f seconds", time.time() - t1)
# super().analyze(pairs)
def advise_all_indicators(
self, data: Dict[str, pd.DataFrame]
) -> Dict[str, pd.DataFrame]:
"""only used in backtesting/hyperopt"""
for s in STRATEGIES:
self.get_strategy(s)
logger.info("Loaded all strategies")
t1 = time.time()
indicators = super().advise_all_indicators(data)
logger.info("Advise all elapsed: %s", time.time() - t1)
return indicators
def populate_indicators(
self, dataframe: pd.DataFrame, metadata: dict
) -> pd.DataFrame:
inf_frames: list[pd.DataFrame] = []
for strategy_name in STRATEGIES:
strategy = self.get_strategy(strategy_name)
dataframe = strategy.advise_indicators(dataframe, metadata)
# remove inf data from dataframe to avoid duplicates
# _x or _y gets added to the inf columns that already exist
inf_frames.append(dataframe.filter(regex=r"\w+_\d{1,2}[mhd]"))
dataframe = dataframe[
dataframe.columns.drop(
list(dataframe.filter(regex=r"\w+_\d{1,2}[mhd]"))
)
]
# add informative data back to dataframe
for frame in inf_frames:
for col, series in frame.iteritems():
if col in dataframe:
continue
dataframe[col] = series
return dataframe
def populate_buy_trend(
self, dataframe: pd.DataFrame, metadata: dict
) -> pd.DataFrame:
"""
Populates the buy signal for all strategies. Each strategy with a buy signal will be
added to the buy_tag. Open to constructive criticism!
"""
strategies = STRATEGIES.copy()
dataframe['buy_tag'] = ''
dataframe['buy_strategies'] = ''
for strategy_name in strategies:
# load instance of strategy_name
strategy = self.get_strategy(strategy_name)
# essentially call populate_buy_trend on strategy_name
# I use copy() here to prevent duplicate columns from being populated
strategy_dataframe = strategy.advise_buy(dataframe.copy(), metadata)
# create column for `strategy`
strategy_dataframe.loc[:, "buy_strategies"] = ""
# On every candle that a buy signal is found, strategy_name
# name will be added to its 'buy_strategies' column
strategy_dataframe.loc[
strategy_dataframe.buy == 1, "buy_strategies"
] = strategy_name
# get the strategies that already exist for the row in the original dataframe
strategy_dataframe.loc[:, "existing_strategies"] = dataframe[
"buy_strategies"
]
# join the strategies found in the original dataframe's row with the new strategy
strategy_dataframe.loc[:, "buy_strategies"] = strategy_dataframe.apply(
lambda x: ",".join(
(x["buy_strategies"], x["existing_strategies"])
).strip(","),
axis=1,
)
# update the original dataframe with the new strategies buy signals
dataframe.loc[:, "buy_strategies"] = strategy_dataframe["buy_strategies"]
for k in strategy_dataframe:
if k not in dataframe:
dataframe[k] = strategy_dataframe[k]
# drop unnecessary columns
dataframe.drop(
[
'existing_strategies',
],
axis=1,
inplace=True,
errors="ignore",
)
dataframe.loc[
(dataframe.buy_strategies != ''), 'buy_tag'
] = dataframe.buy_strategies
# set `buy` column of rows with a buy_tag to 1
dataframe.loc[dataframe.buy_tag != "", "buy"] = 1
return dataframe
def populate_sell_trend(
self, dataframe: pd.DataFrame, metadata: dict
) -> pd.DataFrame:
"""
Populates the sell signal for all strategies. This however will not set the sell signal.
This will only add the strategy name to the `ensemble_sells` column.
custom_sell will then sell based on the strategies in that column.
"""
dataframe['sell_tag'] = ''
dataframe['sell_strategies'] = ''
dataframe['exit_tag'] = None
strategies = STRATEGIES.copy()
# only populate strategies with open trades
if self.is_live_or_dry:
strategies_in_trades = set()
trades: list[Trade] = Trade.get_open_trades()
for t in trades:
strategies_in_trades.update(t.buy_tag.split(","))
strategies = strategies_in_trades
for strategy_name in strategies:
# If you know a better way of doing this, feel free to criticize this and let me know!
# load instance of strategy_name
strategy = self.get_strategy(strategy_name)
# essentially call populate_sell_trend on strategy_name
# I use copy() here to prevent duplicate columns from being populated
dataframe_copy = strategy.advise_sell(dataframe.copy(), metadata)
# create column for `strategy`
dataframe_copy.loc[:, "sell_strategies"] = ""
# On every candle that a sell signal is found, strategy_name
# name will be added to its 'sell_strategies' column
dataframe_copy.loc[
dataframe_copy.sell == 1, "sell_strategies"
] = strategy_name
# get the strategies that already exist for the row in the original dataframe
dataframe_copy.loc[:, "existing_strategies"] = dataframe["sell_strategies"]
# join the strategies found in the original dataframe's row with the new strategy
dataframe_copy.loc[:, "sell_strategies"] = dataframe_copy.apply(
lambda x: ",".join(
(x["sell_strategies"], x["existing_strategies"])
).strip(","),
axis=1,
)
# update the original dataframe with the new strategies sell signals
dataframe.loc[:, "sell_strategies"] = dataframe_copy["sell_strategies"]
for k in dataframe_copy:
if k not in dataframe:
dataframe[k] = dataframe_copy[k]
dataframe.drop(
[
'new_sell_tag',
'existing_strategies',
],
axis=1,
inplace=True,
errors="ignore",
)
dataframe.loc[dataframe.sell_strategies != '', 'sell'] = 1
dataframe.loc[
(dataframe.sell_strategies != '') & dataframe.exit_tag.isna(), 'exit_tag'
] = (dataframe.sell_strategies + f'-ss')
return dataframe
def should_sell(
self,
trade: Trade,
rate: float,
date: datetime,
buy: bool,
sell: bool,
low: float = None,
high: float = None,
force_stoploss: float = 0,
) -> SellCheckTuple:
# load the valid strategies for the pair
strategies = STRATEGIES.copy()
# go through each strategy and ask if it should sell
dataframe, _ = self.dp.get_analyzed_dataframe(trade.pair, self.timeframe)
last_candle = dataframe.iloc[-1].squeeze()
# do not honor the sell signal of a strategy that is not in the buy tag
if sell:
buy_strategies = set(trade.buy_tag.split(','))
sell_strategies = set(last_candle['sell_strategies'].split(','))
# make sure at least 1 sell strategy is in the buy strategies
if not sell_strategies.intersection(buy_strategies):
sell = False
else:
return SellCheckTuple(
SellType.SELL_SIGNAL,
f'({last_candle["sell_strategies"]}-ss',
)
for strategy_name in strategies:
strategy = self.get_strategy(strategy_name)
if strategy_name not in trade.buy_tag:
# do not honor the should_sell of a strategy that is not in the buy tag
continue
sell_check = strategy.should_sell(
trade, rate, date, buy, sell, low, high, force_stoploss
)
if sell_check is not None:
sell_check.sell_reason = (
f'{strategy.get_strategy_name()}-{sell_check.sell_reason}'
)
return sell_check
return super().should_sell(
trade, rate, date, buy, sell, low, high, force_stoploss
)
def confirm_trade_exit(
self,
pair: str,
trade: Trade,
order_type: str,
amount: float,
rate: float,
time_in_force: str,
sell_reason: str,
current_time: datetime,
**kwargs,
) -> bool:
for strategy_name in trade.buy_tag.split(","):
strategy = self.get_strategy(strategy_name)
try:
trade_exit = strategy.confirm_trade_exit(
pair,
trade,
order_type,
amount,
rate,
time_in_force,
sell_reason,
current_time=current_time,
)
except Exception as e:
logger.exception(
"Exception from %s in confirm_trade_exit", strategy_name, exc_info=e
)
continue
if not trade_exit:
return False
# slippage protection from NotAnotherSMAOffsetStrategy
try:
state = self.slippage_protection["__pair_retries"]
except KeyError:
state = self.slippage_protection["__pair_retries"] = {}
dataframe, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe)
candle = dataframe.iloc[-1].squeeze()
slippage = (rate / candle["close"]) - 1
if slippage < self.slippage_protection["max_slippage"]:
pair_retries = state.get(pair, 0)
if pair_retries < self.slippage_protection["retries"]:
state[pair] = pair_retries + 1
return False
state[pair] = 0
return True
|
"""Structures that transport call parameters and input data.
Requests are objects created from incoming calls, thus they shall deal with
things like incorrect values, missing parameters, wrong formats, etc. and
transport data from outside the application into the interactors layer.
"""
from typing import Dict, Optional, Union
class InvalidRequest:
""""Contains requests validation errors and other errors from inner layers."""
def __init__(self):
self.errors = []
def add_error(self, parameter: str, message: str):
self.errors.append({'parameter': parameter, 'message': message})
def has_errors(self) -> bool:
return len(self.errors) > 0
def __bool__(self):
return False
class ValidRequest:
""""Contains interactor call parameters and input data."""
@classmethod
def from_dict(cls, adict: Dict) -> Union['ValidRequest', InvalidRequest]:
raise NotImplementedError
def __bool__(self):
return True
class OrganizationStatsRequest(ValidRequest):
"""RepositoriesStats interactor request."""
def __init__(self, organization_name: str):
self.organization_name = organization_name
@classmethod
def from_dict(cls, adict):
invalid_request = InvalidRequest()
if 'organization_name' not in adict:
invalid_request.add_error('organization_name', 'Is required')
if 'organization_name' in adict and not isinstance(adict['organization_name'], str):
invalid_request.add_error('organization_name', 'Is not string')
if invalid_request.has_errors():
return invalid_request
return OrganizationStatsRequest(organization_name=adict['organization_name'])
class ChubbiestRepositoriesRequest(ValidRequest):
"""ChubbiestRepositories interactor request."""
def __init__(self, limit: Optional[int]=None):
self.limit = limit or 10
@classmethod
def from_dict(cls, adict):
invalid_request = InvalidRequest()
try:
adict['limit'] = int(adict.get('limit', 10))
except ValueError:
invalid_request.add_error('limit', 'Is not integer')
else:
if not (1 <= adict['limit'] <= 100):
invalid_request.add_error('limit', 'Must be between 1 and 100, both included')
if invalid_request.has_errors():
return invalid_request
return ChubbiestRepositoriesRequest(limit=adict['limit'])
|
"""
Django settings for belajar_online project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from environs import Env
env = Env()
env.read_env()
ALLOWED_HOSTS = env.list('ALLOWED_HOSTS')
DEBUG = env.bool('DEBUG')
DEV_MODE = env.bool('DEV_MODE')
DB_NAME = env('DB_NAME')
DB_USER = env('DB_USER')
DB_PASSWORD = env('DB_PASSWORD')
DB_HOST = env('DB_HOST')
DB_PORT = env('DB_PORT')
HIDE_NAV = [
'main:index',
'account:login',
'account:register'
]
HIDE_FOOTER = [
'account:login',
'account:register'
]
HIDE_SCROLLBAR = [
'main:index',
'account:login',
'account:register'
]
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
LOGIN_URL = 'account:login'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'zd!#x!&h+z8%y%h%)crs9mq@xzd1-*%8v5c15fxmk(x+vm0unp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = DEBUG
ALLOWED_HOSTS = ALLOWED_HOSTS
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'account',
'dashboard',
'user_profile',
'learning',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'belajar_online.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'belajar_online.utils.context_processors'
],
},
},
]
WSGI_APPLICATION = 'belajar_online.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': DB_NAME,
'USER': DB_USER,
'PASSWORD': DB_PASSWORD,
'HOST': DB_HOST,
'PORT': DB_PORT,
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
},
'chat': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3')
}
}
AUTH_USER_MODEL = 'account.User'
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Jakarta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Messaging
# https://docs.djangoproject.com/en/3.0/ref/contrib/messages/
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'belajar_online/static'),
os.path.join(BASE_DIR, 'learning/static'),
os.path.join(BASE_DIR, 'dashboard/static'),
]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Media files (user-uploaded files)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# ETC
NUMBER_GROUPING = 3
USE_THOUSAND_SEPARATOR = True
THOUSAND_SEPARATOR = '.'
DECIMAL_SEPARATOR = ','
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-@VERSIONEER-VERSION@ (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
|
import operator
import unittest
import os
from securitytxt.securitytxt import SecurityTXT
class TestFromFile(unittest.TestCase):
def test_from_file(self):
files_dir = f"{os.path.dirname(os.path.realpath(__file__))}/files/"
folders = next(os.walk(files_dir))[1]
for folder in folders:
with self.subTest(msg=f"Checking test case {folder}"):
dir = f"{files_dir}/{folder}"
result = self.get_result(dir)
expected_result = self.get_expected_result(dir)
self.assertEqual(result, expected_result)
def get_result(self, dir: str) -> str:
with open(f"{dir}/in.txt", 'r') as in_file:
securitytxt = SecurityTXT.from_text(in_file.read())
result = {k: v for k, v in sorted(securitytxt.__dict__.items(), key=operator.itemgetter(0))}
return str(result)
def get_expected_result(self, dir: str) -> str:
with open(f"{dir}/out.txt", 'r') as out_file:
return out_file.read()
|
from tethys_sdk.base import TethysAppBase, url_map_maker
from tethys_sdk.app_settings import CustomSetting, PersistentStoreDatabaseSetting
class HydroshareTimeseriesManager(TethysAppBase):
"""
Tethys app class for HydroShare Time Series Manager.
"""
name = 'HydroShare Time Series Manager'
index = 'hydroshare_timeseries_manager:home'
icon = 'hydroshare_timeseries_manager/images/cuahsi_logo.png'
package = 'hydroshare_timeseries_manager'
root_url = 'hydroshare-timeseries-manager'
color = '#004d99'
description = 'The HydroShare Time Series Manager app helps you import time series data into HydroShare from WaterOneFlow services.'
tags = 'Time Series, Referenced Time Series, HydroShare, WaterOneFlow, HIS'
enable_feedback = False
feedback_emails = []
def url_maps(self):
"""
Add controllers
"""
UrlMap = url_map_maker(self.root_url)
url_maps = (
UrlMap(
name='home',
url='hydroshare-timeseries-manager',
controller='hydroshare_timeseries_manager.controllers.home'
),
UrlMap(
name='ajax_update_table',
url='hydroshare-timeseries-manager/ajax/update-table',
controller='hydroshare_timeseries_manager.ajax_controllers.update_table'
),
UrlMap(
name='ajax_update_selections',
url='hydroshare-timeseries-manager/ajax/update-selections',
controller='hydroshare_timeseries_manager.ajax_controllers.update_selections'
),
UrlMap(
name='ajax_remove_timeseries',
url='hydroshare-timeseries-manager/ajax/remove-timeseries',
controller='hydroshare_timeseries_manager.ajax_controllers.remove_timeseries'
),
UrlMap(
name='ajax_add_session_data',
url='hydroshare-timeseries-manager/ajax/add-session-data',
controller='hydroshare_timeseries_manager.ajax_controllers.add_session_data'
),
UrlMap(
name='ajax_prepare_session_data',
url='hydroshare-timeseries-manager/ajax/prepare-session-data',
controller='hydroshare_timeseries_manager.ajax_controllers.prepare_session_data'
),
UrlMap(
name='ajax_update_resource_metadata',
url='hydroshare-timeseries-manager/ajax/update-resource-metadata',
controller='hydroshare_timeseries_manager.ajax_controllers.update_resource_metadata'
),
UrlMap(
name='ajax_create_resource',
url='hydroshare-timeseries-manager/ajax/create-resource',
controller='hydroshare_timeseries_manager.ajax_controllers.create_resource'
),
)
'''UrlMap(
name='ajax_login_test',
url='hydroshare-timeseries-manager/ajax/login-test',
controller='hydroshare_timeseries_manager.ajax_controllers.login_test'
),
UrlMap(
name='ajax_load_session_data',
url='hydroshare-timeseries-manager/ajax/load-session-data',
controller='hydroshare_timeseries_manager.ajax_controllers.load_session_data'
),
UrlMap(
name='ajax_prepare_timeseries_data',
url='hydroshare-timeseries-manager/ajax/prepare-timeseries-data',
controller='hydroshare_timeseries_manager.ajax_controllers.prepare_timeseries_data'
),
UrlMap(
name='ajax_check_timeseries_status',
url='hydroshare-timeseries-manager/ajax/ajax-check-timeseries-status',
controller='hydroshare_timeseries_manager.ajax_controllers.ajax_check_timeseries_status'
),
UrlMap(
name='ajax_create_hydroshare_resource',
url='hydroshare-timeseries-manager/ajax/ajax-create-hydroshare-resource',
controller='hydroshare_timeseries_manager.ajax_controllers.ajax_create_hydroshare_resource'
),'''
return url_maps
def custom_settings(self):
custom_settings = (
CustomSetting(
name='hydroshare_url',
type=CustomSetting.TYPE_STRING,
description='HydroShare URL',
required=True
),
CustomSetting(
name='hydroserver_url',
type=CustomSetting.TYPE_STRING,
description='HydroServer URL',
required=True
),
)
return custom_settings
def persistent_store_settings(self):
ps_settings = (
PersistentStoreDatabaseSetting(
name='hydroshare_timeseries_manager',
description='HydroShare Time Series Manager Database',
initializer='hydroshare_timeseries_manager.model.init_hydroshare_timeseries_manager_db',
required=True
),
)
return ps_settings
|
from django.core.mail import EmailMessage, SMTPConnection
from django.http import HttpResponse
from django.shortcuts import render_to_response
def no_template_view(request):
"A simple view that expects a GET request, and returns a rendered template"
return HttpResponse("No template used")
|
"""
The Cibin package.
"""
__version__ = "0.0.1"
from .cibin import *
|
import torch.nn as nn
from torchvision.models import resnet34, resnet18, resnet50 , resnet101, resnet152
from base import BaseModel
class ResNet152(BaseModel):
def __init__(self, num_classes=196, use_pretrained=True):
super(BaseModel, self).__init__()
self.model = resnet152(pretrained=use_pretrained)
# replace last layer with total cars classes
n_inputs = self.model.fc.in_features
classifier = nn.Sequential(nn.Linear(n_inputs, num_classes))
self.model.fc = classifier
def forward(self, x):
return self.model(x)
class ResNet101(BaseModel):
def __init__(self, num_classes=196, use_pretrained=True):
super(BaseModel, self).__init__()
self.model = resnet101(pretrained=use_pretrained)
# replace last layer with total cars classes
n_inputs = self.model.fc.in_features
classifier = nn.Sequential(nn.Linear(n_inputs, num_classes))
self.model.fc = classifier
def forward(self, x):
return self.model(x)
class ResNet50(BaseModel):
def __init__(self, num_classes=196, use_pretrained=True):
super(BaseModel, self).__init__()
self.model = resnet50(pretrained=use_pretrained)
# replace last layer with total cars classes
n_inputs = self.model.fc.in_features
classifier = nn.Sequential(nn.Linear(n_inputs, num_classes))
self.model.fc = classifier
def forward(self, x):
return self.model(x)
class ResNet34(BaseModel):
def __init__(self, num_classes=196, use_pretrained=True):
super(BaseModel, self).__init__()
self.model = resnet34(pretrained=use_pretrained)
# replace last layer with total cars classes
n_inputs = self.model.fc.in_features
classifier = nn.Sequential(nn.Linear(n_inputs, num_classes))
self.model.fc = classifier
def forward(self, x):
return self.model(x)
class ResNet18(BaseModel):
def __init__(self, num_classes=196, use_pretrained=True):
super(BaseModel, self).__init__()
self.model = resnet18(pretrained=use_pretrained)
# replace last layer with total cars classes
n_inputs = self.model.fc.in_features
classifier = nn.Sequential(nn.Linear(n_inputs, num_classes))
self.model.fc = classifier
def forward(self, x):
return self.model(x)
|
from zeroconf import ServiceBrowser, ServiceListener, Zeroconf
import socket
"""
Class for mDNS. Will search for services on local network and once found each service
"""
class mDNS():
"""
Searches for service by type and name on local network, within default 3 second timeout
returns ipv4 address string, and int port
"""
@staticmethod
def serviceStatus(service, timeout = 3):
if service != None and len(service) == 2: # checks that type and name exist
zeroconf = Zeroconf()
info = zeroconf.get_service_info(service[0], service[1]) # inputs service type, and name
if info: # found
return ( (socket.inet_ntoa(info.addresses[0])) , info.port) # converts address from 32-bit binary to IPV4 string
else: # not found
return None, None
zeroconf.close() |
import argparse
import datetime
import os
import time
import paddle
import paddle.distributed as dist
import json
from pathlib import Path
from engine import train_one_epoch, evaluate
import utils
from dataset import CycleMLPdataset, build_transfrom
from losses import DistillationLoss, SoftTargetCrossEntropy, LabelSmoothingCrossEntropy
from data import Mixup
from create import create_model, create_optimizer_scheduler
def get_args_parser():
parser = argparse.ArgumentParser('CycleMLP training and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=64, type=int)
parser.add_argument('--epochs', default=300, type=int)
# Model parameters
parser.add_argument('--model', default='CycleMLP_B1', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--num-classes', type=int, default=1000,
help='number of categories')
parser.add_argument('--model-pretrained', type=str, default='',
help='local model parameter path')
# Optimizer parameters
parser.add_argument('--opt', default='AdamW', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "AdamW"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-beta1', default=None, type=float, nargs='+', metavar='BETA1',
help='Optimizer Beta1 (default: None, use opt default)')
parser.add_argument('--opt-beta2', default=None, type=float, nargs='+', metavar='BETA2',
help='Optimizer Beta1 (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='CosineAnnealingDecay', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "CosineAnnealingDecay"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--t-max', default=300, type=int,
help='the upper limit for training is half the cosine decay period, the default equal epochs')
parser.add_argument('--eta-min', default=0, type=float,
help='the minimum value of the learning rate is ηmin in the formula, the default value is 0')
parser.add_argument('--last-epoch', default=-1, type=int,
help='the epoch of the previous round is set to the epoch of the previous round when training is restarted.\
the default value is -1, indicating the initial learning rate ')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters
parser.add_argument('--teacher-model', default='RegNetX_4GF', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "RegNetX_4GF"')
parser.add_argument('--teacher-pretrained', default=None, type=str,
help='teacher model parameters must be downloaded locally')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--train-data-dir', default='./', type=str, help='image folder path')
parser.add_argument('--train-txt-path', default='./train.txt', type=str,
help='image file name and label information file')
parser.add_argument('--train-data-mode', default='train', type=str,
help="one of ['train', 'val', 'test'], the TXT file whether contains labels")
parser.add_argument('--val-data-dir', default='./', type=str, help='image folder path')
parser.add_argument('--val-txt-path', default='./val.txt', type=str,
help='image file name and label information file')
parser.add_argument('--val-data-mode', default='val', type=str,
help="one of ['train', 'val', 'test'], the TXT file whether contains labels")
parser.add_argument('--num_workers', default=0, type=int)
parser.add_argument('--output_dir', default='./output',
help='path where to save, empty for no saving')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
# distributed training
parser.add_argument('--is_distributed', default=False, type=bool,
help='whether to enable single-machine multi-card training')
# custom parameters
parser.add_argument('--is_amp', default=False, type=bool,
help='whether to enable automatic mixing precision training')
parser.add_argument('--init_loss_scaling', default=1024, type=float,
help='initial Loss Scaling factor. The default value is 1024')
return parser
def main(args):
print(args)
if args.distillation_type != 'none' and args.finetune:
raise NotImplementedError("Finetuning with distillation not yet supported")
# 构建数据
train_transform = build_transfrom(is_train=True,args=args)
train_dataset = CycleMLPdataset(args.train_data_dir, args.train_txt_path, mode=args.train_data_mode, transform=train_transform)
data_loader_train = paddle.io.DataLoader(
dataset=train_dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
drop_last=True,
)
val_transform = build_transfrom(is_train=False, args=args)
val_dataset = CycleMLPdataset(args.val_data_dir, args.val_txt_path, mode=args.val_data_mode, transform=val_transform)
data_loader_val = paddle.io.DataLoader(
dataset=val_dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
drop_last=False
)
# mixup混类数据增强
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=args.model_pretrained,
is_teacher=False,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path)
# 配置蒸馏模型
teacher_model = None
if args.distillation_type != 'none':
print(f"Creating teacher model: {args.teacher_model}")
teacher_model = create_model(
args.teacher_model,
pretrained=False,
is_teacher=True,
class_num=args.num_classes
)
if os.path.exists(args.teacher_pretrained):
teacher_model.set_state_dict(paddle.load(args.teacher_pretrained))
teacher_model.eval()
get_world_size = 1
# 是否分布式
if args.is_distributed:
dist.init_parallel_env()
model = paddle.DataParallel(model)
teacher_model = paddle.DataParallel(teacher_model)
get_world_size = dist.get_world_size()
# finetune 微调
if args.finetune:
if os.path.exists(args.finetune):
print('You must download the finetune model and place it locally.')
else:
checkpoint = paddle.load(args.finetune)
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias', 'head_dist.weight', 'head_dist.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape((-1, orig_size, orig_size, embedding_size)).transpose((0, 3, 1, 2))
pos_tokens = paddle.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.transpose((0, 2, 3, 1)).flatten(1, 2)
new_pos_embed = paddle.concat((extra_tokens, pos_tokens), axis=1)
checkpoint_model['pos_embed'] = new_pos_embed
model.set_state_dict(checkpoint_model)
# 优化器配置
linear_scaled_lr = args.lr * args.batch_size * get_world_size / 512.0
args.lr = linear_scaled_lr
optimizer, scheduler = create_optimizer_scheduler(args, model)
# setup automatic mixed-precision (AMP) loss scaling and op casting
loss_scaler = None
if args.is_amp:
loss_scaler = paddle.amp.GradScaler(init_loss_scaling=args.init_loss_scaling)
n_parameters = sum(p.numel() for p in model.parameters() if not p.stop_gradient).numpy()[0]
print('number of params:', n_parameters)
print('=' * 30)
if args.mixup > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = paddle.nn.CrossEntropyLoss()
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
criterion = DistillationLoss(
criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau
)
# 训练
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
log_path = args.output_dir + "/train_log.txt"
for epoch in range(args.start_epoch, args.epochs):
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, epoch, log_path, scheduler,
loss_scaler, mixup_fn, args.is_distributed)
# 参数保存
if args.output_dir:
utils.save_on_master({
'pdparams': model.state_dict(),
'pdopt': optimizer.state_dict(),
'pdsched': scheduler.state_dict(),
'pdepoch': epoch,
'pdscaler': loss_scaler.state_dict() if loss_scaler is not None else None,
'pdargs': args,
}, args.output_dir + f'/checkpoint_{epoch}')
# 验证
test_stats = evaluate(data_loader_val, model, log_path)
print(f"Accuracy of the network on the {len(val_dataset)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': str(n_parameters)}
for key in log_stats:
print(type(log_stats[key]))
if args.output_dir and utils.is_main_process():
with open(log_path, 'a') as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('CycleMLP training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args) |
from django.conf import settings
settings.MIDDLEWARE += (
'djangosessionnotifier.middleware.NotifierMiddleware',
)
|
from PIL import ImageGrab
import win32gui
import sys
import cv2 as cv
import numpy as np
def list_window_names():
# get list of running apps
top_list, win_list = [], []
def enum_cb(hwnd, results):
win_list.append((hwnd, win32gui.GetWindowText(hwnd)))
win32gui.EnumWindows(enum_cb, top_list)
return win_list
class WindowCapture:
def __init__(self, window_name):
win_list = list_window_names()
window = [(hwnd, title) for hwnd, title in win_list if window_name in title]
# grab hwnd for first window with match
try:
window = window[0]
except IndexError:
sys.exit("No window Found, did you open the program?")
# set window
self.hwnd = window[0]
win32gui.SetForegroundWindow(self.hwnd)
self.bbox = win32gui.GetWindowRect(self.hwnd)
self.bbox = (self.bbox[0], self.bbox[1], self.bbox[2], self.bbox[3])
def get_screenshot(self):
"""
Gets screenshot from window
:return:
"""
screenshot = np.array(ImageGrab.grab(self.bbox))
# convert to np array and then RGB to BGR
screenshot = np.array(screenshot)
# screenshot = screenshot.astype(np.uint8)
screenshot = cv.cvtColor(screenshot, cv.COLOR_RGB2BGR)
return screenshot
|
#!/usr/bin/env python3
"""
A simple script that fixes up post-pnr verilog and SDF files from VPR:
- Removes incorrect constants "1'b0" connected to unconnected cell port,
- Disconnects all unconnected outputs from the "DummyOut" net,
- appends a correct prefix for each occurrence of a binary string in round
brackets. For example "(010101)" is converted into "(6'b010101)".
When the option "--split-ports" is given the script also breaks all references
to wide cell ports into references to individual pins.
One shortcoming of the script is that it may treat a decimal value of 10, 100
etc. as binary. Fortunately decimal literals haven't been observed to appear
in Verilog files written by VPR.
"""
import argparse
import os
import re
# =============================================================================
def split_verilog_ports(code):
"""
Splits assignments of individual nets to wide cell ports into assignments
of those nets to 1-bit wide cell ports. Effectively splits cell ports as
well.
"""
def sub_func(match):
port = match.group("port")
conn = match.group("conn").strip().replace("\n", "")
# Get individual signals
signals = [s.strip() for s in conn.split(",")]
# Format new port connections
conn = []
for i, signal in enumerate(signals):
j = len(signals) - 1 - i
conn.append(".\\{}[{}] ({} )".format(port, j, signal))
conn = ", ".join(conn)
return conn
code = re.sub(
r"\.(?P<port>\S+)\s*\(\s*{(?P<conn>[^}]+)}\s*\)",
sub_func,
code,
flags=re.DOTALL
)
return code
def split_sdf_ports(code):
"""
Escapes square brackets in port names given in delay path specifications
which results of indexed multi-bit ports being represented as individual
single-bit ones.
"""
def sub_func(match):
return match.group(0).replace("[", "\\[").replace("]", "\\]")
code = re.sub(
r"\((?P<keyword>SETUP|HOLD|IOPATH)\s+"
r"(?P<port1>(\([^\)]*\))|\S+)\s+(?P<port2>(\([^\)]*\))|\S+)", sub_func,
code
)
return code
# =============================================================================
def merge_verilog_ports(code):
to_merge = {}
def module_def_sub_func(match):
"""
Replaces single bit ports with multi-bit ports in module IO definition
"""
def replace_ports(match):
"""
Creates single multi-bit port definition
"""
base = match.group("base")
port_def = "\n {} [{}:{}] {},".format(
to_merge[base]["direction"], to_merge[base]["max"],
to_merge[base]["min"], base
)
return port_def
module_def_s = match.group(0)
# Find all single bit ports that can be converted to multi bit ones
matches = re.finditer(
r"\s*(?P<direction>(input|output|inout))\s+"
r"(?P<port>\\(?P<base>\S+)\[(?P<index>[0-9]+)\])", module_def_s
)
# Gather data about ports
for match in matches:
if (match is not None):
base = match.group("base")
index = match.group("index")
direction = match.group("direction")
if (base in to_merge.keys()):
assert direction == to_merge[base][
"direction"
], "Port direction inconsistency for port {}".format(base)
to_merge[base]["ids"].append(int(index))
if (int(index) < to_merge[base]["min"]):
to_merge[base]["min"] = int(index)
if (int(index) > to_merge[base]["max"]):
to_merge[base]["max"] = int(index)
else:
to_merge[base] = {
"direction": direction,
"ids": [int(index)],
"min": int(index),
"max": int(index)
}
# Check index consistency
for base, specs in to_merge.items():
specs["ids"].sort()
assert list(
range(specs["min"], specs["max"] + 1)
) == specs["ids"], "Port indexes inconsistency for port {}".format(
base
)
# Replace zero-indexed ports with multi-bit ports
module_def_s = re.sub(
r"\s*(?P<direction>(input|output|inout))\s+"
r"(?P<port>\\(?P<base>\S+)\[(?P<index>0)\]\s*,?)", replace_ports,
module_def_s
)
# remove non-zero-indexed ports
module_def_s = re.sub(
r"\s*(?P<direction>(input|output|inout))\s+"
r"(?P<port>\\(?P<base>\S+)\[(?P<index>[1-9]+[0-9]*)\]\s*,?)", "",
module_def_s
)
# Ensure that there is no colon at the last line of the module IO definition
module_def_s = re.sub(r",\s*\)\s*;", ");", module_def_s)
return module_def_s
def port_usage_sub_func(match):
"""
Creates single multi-bit port definition
"""
base = match.group("base")
index = match.group("index")
trailing_ws = match.group(0)[-1]
port_usage = "{}[{}]{}".format(base, index, trailing_ws)
return port_usage
# Find module IO definition and substitute it
code = re.sub(
r"\s*module\s+\S+\s*\([\s\S]*?\);",
module_def_sub_func,
code,
flags=re.DOTALL
)
# Find all other occurances of excaped identifiers for single bit ports
# and substitute them with indexed multi bit ports
code = re.sub(
r"\\(?P<base>\S+)\[(?P<index>[0-9]+)\]\s",
port_usage_sub_func,
code,
flags=re.DOTALL
)
return code
# =============================================================================
def main():
# Parse arguments
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--vlog-in", type=str, default=None, help="Input Verilog file"
)
parser.add_argument(
"--vlog-out", type=str, default=None, help="Output Verilog file"
)
parser.add_argument(
"--sdf-in", type=str, default=None, help="Input SDF file"
)
parser.add_argument(
"--sdf-out", type=str, default=None, help="Output SDF file"
)
parser.add_argument(
"--split-ports", action="store_true", help="Split multi-bit ports"
)
args = parser.parse_args()
# Check args
if not args.vlog_in and not args.sdf_in:
print("Please provide at least one of --vlog-in, --sdf-in")
exit(1)
# Process Verilog netlist
if args.vlog_in:
# Read the input verilog file
with open(args.vlog_in, "r") as fp:
code = fp.read()
# Remove connection to 1'b0 from all ports. Do this before fixing up
# binary string prefixes so binary parameters won't be affected
code = re.sub(
r"\.(?P<port>\w+)\s*\(\s*1'b0\s*\)", r".\g<port>(1'bZ)", code
)
# Remove connections to the "DummyOut" net
code = re.sub(
r"\.(?P<port>\w+)\s*\(\s*DummyOut\s*\)", r".\g<port>()", code
)
# Fixup multi-bit port connections
def sub_func_1(match):
port = match.group("port")
conn = match.group("conn")
conn = re.sub(r"1'b0", "1'bZ", conn)
conn = re.sub(r"DummyOut", "", conn)
return ".{}({{{}}})".format(port, conn)
code = re.sub(
r"\.(?P<port>\w+)\s*\(\s*{(?P<conn>[^}]*)}\s*\)", sub_func_1, code
)
# Prepend binary literal prefixes
def sub_func(match):
assert match is not None
value = match.group("val")
# Collapse separators "_"
value = value.replace("_", "")
# Add prefix, format the final string
lnt = len(value)
return "({}'b{})".format(lnt, value)
code = re.sub(r"\(\s*(?P<val>[01_]+)\s*\)", sub_func, code)
# Write the output verilog file
fname = args.vlog_out
if not fname:
root, ext = os.path.splitext(args.vlog_in)
fname = "{}.fixed{}".format(root, ext)
# Split ports
if args.split_ports:
code = split_verilog_ports(code)
with open(fname, "w") as fp:
fp.write(code)
# Make sure ports are not split
code = merge_verilog_ports(code)
root, ext = os.path.splitext(fname)
fname = "{}.no_split{}".format(root, ext)
with open(fname, "w") as fp:
fp.write(code)
# Process SDf file
if args.sdf_in:
# Read the input SDF file
with open(args.sdf_in, "r") as fp:
code = fp.read()
# Split ports
if args.split_ports:
code = split_sdf_ports(code)
# Write the output SDF file
fname = args.sdf_out
if not fname:
root, ext = os.path.splitext(args.sdf_in)
fname = "{}.fixed{}".format(root, ext)
with open(fname, "w") as fp:
fp.write(code)
# =============================================================================
if __name__ == "__main__":
main()
|
import os.path, gzip
from whoosh import analysis, fields
from whoosh.support.bench import Bench, Spec
class VulgarTongue(Spec):
name = "dictionary"
filename = "dcvgr10.txt.gz"
headline_field = "head"
def documents(self):
path = os.path.join(self.options.dir, self.filename)
f = gzip.GzipFile(path)
head = body = None
for line in f:
line = line.decode("latin1")
if line[0].isalpha():
if head:
yield {"head": head, "body": head + body}
head, body = line.split(".", 1)
else:
body += line
if head:
yield {"head": head, "body": head + body}
def whoosh_schema(self):
ana = analysis.StemmingAnalyzer()
#ana = analysis.StandardAnalyzer()
schema = fields.Schema(head=fields.ID(stored=True),
body=fields.TEXT(analyzer=ana, stored=True))
return schema
def zcatalog_setup(self, cat):
from zcatalog import indexes #@UnresolvedImport
cat["head"] = indexes.FieldIndex(field_name="head")
cat["body"] = indexes.TextIndex(field_name="body")
if __name__ == "__main__":
Bench().run(VulgarTongue)
|
# encoding: latin2
"""global inequality change test
"""
__author__ = "Juan C. Duque, Alejandro Betancourt"
__credits__ = "Copyright (c) 2009-11 Juan C. Duque"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "contacto@rise-group.org"
__all__ = ['inequalityDynamic']
from theilIndex import theil
import numpy
import itertools
def interregionalInequalityTestOneVariable(Y, area2region, permutations=9999):
def getVar(Y, possition):
result = {}
for k in Y:
result[k] = [Y[k][possition]]
return result
def shuffleMap(Y):
result = {}
values = Y.values()
numpy.random.shuffle(values)
keys = Y.keys()
newY = dict(zip(keys,values))
return newY
results = []
for nv1 in range(len(Y[0])):
var = getVar(Y,nv1)
t1,tb1,tw1 = theil(var,area2region)
numerator = 1
for iter in range(permutations):
var = shuffleMap(var)
t2,tb2,tw2 = theil(var,area2region)
if tb1 <= tb2:
numerator += 1
results.append(numerator/float(permutations+1))
return results
def interregionalInequalityTest(Y, fieldNames, area2regions, clusteringNames, outFile, permutations=9999):
"""Interregional inequality tests over time (p-values)
This function examines whether the differences across a set of clustering
solutions are significant. For more information on this function see
[Rey_Sastre2010] (this function recreates Table 5 in that paper).
Layer.inequality('interregionalInequalityTest', vars, area2regions, outFile=, <permutations>)
:keyword vars: List with variables to be analyzed; e.g: ['Y1978', 'Y1979', 'Y1980', 'Y1981']
:type vars: list
:keyword area2regions: variables in Layer containing regionalization schemes e.g.: ["arisel1", "arisel2", "arisel3", "BELS"]
:type area2regions: list
:keyword outFile: Name for the output file; e.g.: "regionsDifferenceTest.csv"
:type fileName: string
:keyword permutations: Number of random spatial permutations. Default value permutations = 9999.
:type permutations: integer
:rtype: None
:return: None
**Example 1** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.inequality('interregionalInequalityTest',['Y1978', 'Y1979', 'Y1980', 'Y1981'], ['BELS','T78-98','T78-85'], "interregional_inequality_test.csv")
"""
print "Creating interregional Inequality Test [Rey_Sastre2010 - Table 5]"
fout = open(outFile,"w")
line = "," + ",".join(fieldNames) + "\n"
fout.write(line)
for ni, i in enumerate(area2regions[0]):
area2region = [area2regions[x][ni] for x in area2regions]
results = interregionalInequalityTestOneVariable(Y, area2region, permutations=permutations)
results = [str(x) for x in results]
line = clusteringNames[ni] + "," + ",".join(results) + "\n"
fout.write(line)
fout.close()
print "interregional Inequality Test created!"
return None
|
def solveQuestion(stepSize, insertCount):
currentPos = 0
buffer = [0]
for i in range(1, insertCount + 1):
currentIndexJump = currentPos + stepSize
currentIndexActual = (currentIndexJump % len(buffer)) + 1
buffer.insert(currentIndexActual, i)
currentPos = currentIndexActual
return buffer[currentPos + 1]
print(solveQuestion(345, 2017))
|
# coding:utf-8
import MySQLdb
class MysqlHelper:
def __init__(self,host='localhost',port=3306,db='meizi',user='root',passwd='123456',charset='utf8'):
self.conn = MySQLdb.connect(host=host, port=port, db=db, user=user, passwd=passwd, charset=charset)
def insert(self,sql,params):
return self.__cud(sql,params)
def update(self,sql,params):
return self.__cud(sql,params)
def delete(self,sql,params):
return self.__cud(sql,params)
def __cud(self,sql,params=[]):
try:
cs1 = self.conn.cursor()
rows = cs1.execute(sql, params)
self.conn.commit()
cs1.close()
self.conn.close()
return rows
except Exception, e:
print e
self.conn.rollback()
def fetchone(self, sql, params=[]):
try:
cs1 = self.conn.cursor()
cs1.execute(sql, params)
row = cs1.fetchone()
cs1.close()
self.conn.close()
return row
except Exception, e:
print e
def fetchall(self, sql, params=[]):
try:
cs1 = self.conn.cursor()
cs1.execute(sql, params)
rows = cs1.fetchall()
cs1.close()
self.conn.close()
return rows
except Exception, e:
print e
|
from spinup import trpo_tf1 as trpo
import tensorflow as tf
import gym
env_fn = lambda : gym.make('Ant-v2')
ac_kwargs = dict(hidden_sizes=[64,64], activation=tf.nn.relu)
logger_kwargs = dict(output_dir='../data/trpo/bant_4000_750', exp_name='ant_trpo')
trpo(env_fn=env_fn, ac_kwargs=ac_kwargs, steps_per_epoch=4000, epochs=750, logger_kwargs=logger_kwargs) |
import torch
from torch import nn
from ...config import prepare_config
from collections import OrderedDict
from .resnet_fpn_extractor import ResnetFPNExtractor
from .predictor import BaselinePredictor
from .postprocessor import LocMaxNMSPostprocessor
EXTRACTORS = {
"resnet_fpn": ResnetFPNExtractor,
}
PREDICTORS = {
"baseline": BaselinePredictor,
}
POSTPROCESSORS = {
"loc_max_nms": LocMaxNMSPostprocessor,
}
class Detector(nn.Module):
"""Detector wrapper module.
Can be constructed from config file via :meth:`get_default_config`.
Call :classmeth:get_descriptions or :classmeth:get_descriptions_string
for available components descriptions.
:meth:`forward` sequentially applies extractor and predictor
:meth:`predict` sequentially applies extractor, predictor and postprocessor
If constructed from config:
Config:
extractor:
type: type of extractor. Default: resnet_fpn.
config: config of extractor.
predictor:
type: type of predictor. Default: baseline.
config: config of predictor.
postprocessor:
type: type of postprocessor. Default: loc_max_nms.
config: config of postprocessor.
The number if input channels of predictor is determined dynamically ("in_channels" config is set).
This number is also the number of channels in the embedding tensor, and is saved
in :attr:`embedding_channels`.
"""
@staticmethod
def get_default_config():
return OrderedDict([
("extractor", {
"type": "resnet_fpn",
"config": {},
}),
("predictor", {
"type": "baseline",
"config": {},
}),
("postprocessor", {
"type": "loc_max_nms",
"config": {},
}),
])
def __init__(self, config=None):
super().__init__()
config = prepare_config(self, config)
self.config = config
self.extractor = EXTRACTORS[config["extractor"]["type"]](config["extractor"]["config"])
self.extractor.eval()
with torch.no_grad():
embedding_channels = self.extractor(torch.rand(1,3,129,129))["embedding_t"].shape[1]
self.embedding_channels = embedding_channels
config["predictor"]["config"]["in_channels"] = embedding_channels
self.extractor.train()
self.predictor = PREDICTORS[config["predictor"]["type"]](config["predictor"]["config"])
self.postprocessor = POSTPROCESSORS[config["postprocessor"]["type"]](config["postprocessor"]["config"])
def forward(self, x):
x = self.extractor(x)
x = self.predictor(x)
return x
def predict(self, x, **postprocessor_kwargs):
x = self.forward(x)
x = self.postprocessor(x, **postprocessor_kwargs)
return x
@staticmethod
def get_descriptions():
desc = {}
for category, collection\
in zip(["extractors", "predictors", "postprocessors"], [EXTRACTORS, PREDICTORS, POSTPROCESSORS]):
desc[category] = [{"name": name, "description": module.__doc__} for name, module in collection.items()]
return desc
@classmethod
def get_descriptions_string(cls):
desc = cls.get_descriptions()
desc_str = ""
for category, collection in desc.items():
desc_str += f"{category}:\n"
for item in collection:
name = item["name"]
description = item["description"]
desc_str += f"\t{name}:\n"
description = str(description)
for line in description.split("\n"):
desc_str += f"\t\t{line}\n"
return desc_str
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 1 08:32:09 2020
@author: ayjab
"""
import torch
import time
import numpy as np
from lib import my_models,fast_wrappers
env_name = 'PongNoFrameskip-v4'
env = fast_wrappers.make_atari(env_name,skip_noop=True, skip_maxskip=True)
env = fast_wrappers.wrap_deepmind(env, pytorch_img=True, frame_stack=True, frame_stack_count=4, clip_rewards=False)
net = my_models.DuelNoisyDQN(env.observation_space.shape, env.action_space.n)
net.load_state_dict(torch.load('pong_1_4_.dat', map_location='cpu'))
@torch.no_grad()
def play(env, net=None):
state = np.array(env.reset())
rewards = 0.0
while True:
env.render()
time.sleep(0.02)
if net is not None:
stateV = torch.FloatTensor([state])
action = net(stateV).argmax(dim=-1).item()
else:
action = env.action_space.sample()
next_state,reward,done,_= env.step(action)
rewards += reward
if done:
print(rewards)
break
state = np.array(next_state)
time.sleep(0.5)
env.close()
if __name__=='__main__':
play(env, net) |
#!/usr/bin/python
# everscan/everscan.py
from modules.scanning import ScanningManager
from modules.evernote import EvernoteManager
from modules.interface import InterfaceManager
from modules.imaging import ImagingManager
class EverscanMaster:
"""
Everscan master class.
Facilitates communication between child objects.
"""
def __init__(self):
# Initialize child manager objects.
self.m_scanning = ScanningManager(self)
self.m_evernote = EvernoteManager(self)
self.m_interface = InterfaceManager(self)
self.m_imaging = ImagingManager(self) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
def display(d):
print('#0', end='')
for y in range(4):
print('01', end='')
for x in range(8):
print('%02x' % d, end='')
print('')
def brightness(b):
print('#0', end='')
for y in range(4):
print('02', end='')
for x in range(8):
print('%02x' % b, end='')
print('')
# 2次元配列[8][24]
panel = [[0 for x in range(24)] for y in range(8)]
# 2次元配列から7SegArmorの実配列への変換
def to_real_panel(panel):
"""
<armor0> <armor1> <armor2>
panel[0][0:8] panel[0][8:16] panel[0][16:24]
panel[1][0:8] panel[1][8:16] panel[1][16:24]
panel[2][0:8] panel[2][8:16] panel[2][16:24]
panel[3][0:8] panel[3][8:16] panel[3][16:24]
<armor3> <armor4> <armor5>
panel[4][0:8] panel[4][8:16] panel[4][16:24]
panel[5][0:8] panel[5][8:16] panel[5][16:24]
panel[6][0:8] panel[6][8:16] panel[6][16:24]
panel[7][0:8] panel[7][8:16] panel[7][16:24]
"""
real_panel = [
[
panel[0][0:8],
panel[1][0:8],
panel[2][0:8],
panel[3][0:8],
],
[
panel[0][8:16],
panel[1][8:16],
panel[2][8:16],
panel[3][8:16],
],
[
panel[0][16:24],
panel[1][16:24],
panel[2][16:24],
panel[3][16:24],
],
[
panel[4][0:8],
panel[5][0:8],
panel[6][0:8],
panel[7][0:8],
],
[
panel[4][8:16],
panel[5][8:16],
panel[6][8:16],
panel[7][8:16],
],
[
panel[4][16:24],
panel[5][16:24],
panel[6][16:24],
panel[7][16:24],
],
]
return real_panel
# 7SegArmorの実配列からコマンドへの変換
def to_armor_command(real_panel, finger_cmd):
command = []
for armor in range(6):
for y in range(4):
command.append(finger_cmd)
for x in range(8):
command.append(real_panel[armor][y][x])
return command
# パネルからコマンドへの変換
def panel_to_command(panel, finger_cmd):
real_panel = to_real_panel(panel)
command = to_armor_command(real_panel, finger_cmd)
return command
import spidev
import time
# LATCHピンの指定
# GPIO26(37番ピン)を使用する。
import RPi.GPIO as GPIO
# ピン番号ではなく、機能名(例:GPIO26)で指定できるようにする。
GPIO.setmode(GPIO.BCM)
# 出力設定
GPIO.setup(26, GPIO.OUT)
def reverse_bit_order(x):
x_reversed = 0x00
if (x & 0x80):
x_reversed |= 0x01
if (x & 0x40):
x_reversed |= 0x02
if (x & 0x20):
x_reversed |= 0x04
if (x & 0x10):
x_reversed |= 0x08
if (x & 0x08):
x_reversed |= 0x10
if (x & 0x04):
x_reversed |= 0x20
if (x & 0x02):
x_reversed |= 0x40
if (x & 0x01):
x_reversed |= 0x80
return x_reversed
spi = spidev.SpiDev()
spi.open(0, 0)
spi.mode = 0
spi.max_speed_hz = 1000000 # 1MHz
start_time = time.time()
# 輝度を下げる
#brightness = 255//8
brightness = 255//3
panel = [[brightness for x in range(24)] for y in range(8)]
xfer_data = panel_to_command(panel, 0x02)
xfer_data = map(reverse_bit_order, xfer_data)
for i in range(6):
spi.writebytes(xfer_data[i*36:(i+1)*36])
time.sleep(0.001)
time.sleep(0.006)
# 表示更新(LATCH=_| ̄|_)
GPIO.output(26, GPIO.HIGH)
GPIO.output(26, GPIO.LOW)
print('Brightness = %d' % brightness)
num_to_pattern = [
0xfc, # 0
0x60, # 1
0xda, # 2
0xf2, # 3
0x66, # 4
0xb6, # 5
0xbe, # 6
0xe4, # 7
0xfe, # 8
0xf6, # 9
]
import csv
#f = open('kemono_60fps_32768pt.csv', 'rb')
#f = open('kemono_original.csv', 'rb')
#f = open('kemono_4096pt_30fps.csv', 'rb')
#f = open('kemono_4096pt_60fps.csv', 'rb')
#f = open('mtank_4096pt_60fps.csv', 'rb')
#f = open('mtank_4096pt_30fps.csv', 'rb')
#f = open('mtank_4096pt_20fps.csv', 'rb')
#f = open('mtank_8192pt_20fps.csv', 'rb')
#f = open('mtank_8192pt_30fps.csv', 'rb')
#f = open('mtank_8192pt_60fps.csv', 'rb')
f = open('mtank_16384pt_60fps.csv', 'rb')
reader = csv.reader(f)
print('CSV: [OK]')
count = 0
for row in reader:
count += 1
#print(spec)
#continue
# そのまま出す場合
#spec = map(lambda x: int(x), row)
# 出力を加工する場合
#spec = map(lambda x: (int)((int(x) + 1) * 1.6), row)
#spec = map(lambda x: (int)((int(x) + 1) * 2.2), row)
#spec = map(lambda x: (int)((int(x) * 1.0 / 3.5)), row)
spec = map(lambda x: (int)((int(x) * 1.0 / 1.5)), row)
panel = [[0 for x in range(24)] for y in range(8)]
limit = 24
# '&0xFE'とすると*を落とすことになる
# mask = 0xFE
mask = 0xFF
LEVEL0 = 0x00 & mask
LEVEL1 = 0x11 & mask
LEVEL2 = 0x2B & mask
LEVEL3 = 0xED & mask
LEVEL4 = 0x6D & mask
for x in range(limit):
"""
if (spec[x] == 0):
panel[0][x] = 0x01
panel[1][x] = 0x01
panel[2][x] = 0x01
panel[3][x] = 0x01
panel[4][x] = 0x01
panel[5][x] = 0x01
panel[6][x] = 0x01
panel[7][x] = num_to_pattern[0]
elif (spec[x] == 1):
panel[0][x] = 0x01
panel[1][x] = 0x01
panel[2][x] = 0x01
panel[3][x] = 0x01
panel[4][x] = 0x01
panel[5][x] = 0x01
panel[6][x] = 0x01
panel[7][x] = num_to_pattern[0]
elif (spec[x] == 2):
panel[0][x] = 0x01
panel[1][x] = 0x01
panel[2][x] = 0x01
panel[3][x] = 0x01
panel[4][x] = 0x01
panel[5][x] = 0x01
panel[6][x] = num_to_pattern[1]
panel[7][x] = num_to_pattern[0]
elif (spec[x] == 3):
panel[0][x] = 0x01
panel[1][x] = 0x01
panel[2][x] = 0x01
panel[3][x] = 0x01
panel[4][x] = 0x01
panel[5][x] = num_to_pattern[2]
panel[6][x] = num_to_pattern[1]
panel[7][x] = num_to_pattern[0]
elif (spec[x] == 4):
panel[0][x] = 0x01
panel[1][x] = 0x01
panel[2][x] = 0x01
panel[3][x] = 0x01
panel[4][x] = num_to_pattern[3]
panel[5][x] = num_to_pattern[2]
panel[6][x] = num_to_pattern[1]
panel[7][x] = num_to_pattern[0]
elif (spec[x] == 5):
panel[0][x] = 0x01
panel[1][x] = 0x01
panel[2][x] = 0x01
panel[3][x] = num_to_pattern[4]
panel[4][x] = num_to_pattern[3]
panel[5][x] = num_to_pattern[2]
panel[6][x] = num_to_pattern[1]
panel[7][x] = num_to_pattern[0]
elif (spec[x] == 6):
panel[0][x] = 0x01
panel[1][x] = 0x01
panel[2][x] = num_to_pattern[5]
panel[3][x] = num_to_pattern[4]
panel[4][x] = num_to_pattern[3]
panel[5][x] = num_to_pattern[2]
panel[6][x] = num_to_pattern[1]
panel[7][x] = num_to_pattern[0]
elif (spec[x] == 7):
panel[0][x] = 0x01
panel[1][x] = num_to_pattern[6]
panel[2][x] = num_to_pattern[5]
panel[3][x] = num_to_pattern[4]
panel[4][x] = num_to_pattern[3]
panel[5][x] = num_to_pattern[2]
panel[6][x] = num_to_pattern[1]
panel[7][x] = num_to_pattern[0]
elif (spec[x] == 8):
panel[0][x] = num_to_pattern[7]
panel[1][x] = num_to_pattern[6]
panel[2][x] = num_to_pattern[5]
panel[3][x] = num_to_pattern[4]
panel[4][x] = num_to_pattern[3]
panel[5][x] = num_to_pattern[2]
panel[6][x] = num_to_pattern[1]
panel[7][x] = num_to_pattern[0]
#elif (spec[x] == 9):
else:
panel[0][x] = num_to_pattern[8]
panel[1][x] = num_to_pattern[8]
panel[2][x] = num_to_pattern[8]
panel[3][x] = num_to_pattern[8]
panel[4][x] = num_to_pattern[8]
panel[5][x] = num_to_pattern[8]
panel[6][x] = num_to_pattern[8]
panel[7][x] = num_to_pattern[8]
elif (spec[x] == 10):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
elif (spec[x] == 11):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
elif (spec[x] == 12):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
elif (spec[x] == 13):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
elif (spec[x] == 14):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
elif (spec[x] == 15):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
elif (spec[x] == 16):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
elif (spec[x] == 17):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
elif (spec[x] == 18):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
elif (spec[x] == 19):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
elif (spec[x] == 20):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
elif (spec[x] == 21):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
elif (spec[x] == 22):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
elif (spec[x] == 23):
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
else:
panel[0][x] = 0
panel[1][x] = 0
panel[2][x] = 0
panel[3][x] = 0
panel[4][x] = 0
panel[5][x] = 0
panel[6][x] = 0
panel[7][x] = 0
"""
if (spec[x] == 0):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL0
panel[4][x] = LEVEL0
panel[5][x] = LEVEL0
panel[6][x] = LEVEL0
panel[7][x] = LEVEL0
elif (spec[x] == 1):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL0
panel[4][x] = LEVEL0
panel[5][x] = LEVEL0
panel[6][x] = LEVEL0
panel[7][x] = LEVEL1
elif (spec[x] == 2):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL0
panel[4][x] = LEVEL0
panel[5][x] = LEVEL0
panel[6][x] = LEVEL0
panel[7][x] = LEVEL2
elif (spec[x] == 3):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL0
panel[4][x] = LEVEL0
panel[5][x] = LEVEL0
panel[6][x] = LEVEL0
panel[7][x] = LEVEL3
elif (spec[x] == 4):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL0
panel[4][x] = LEVEL0
panel[5][x] = LEVEL0
panel[6][x] = LEVEL1
panel[7][x] = LEVEL4
elif (spec[x] == 5):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL0
panel[4][x] = LEVEL0
panel[5][x] = LEVEL0
panel[6][x] = LEVEL2
panel[7][x] = LEVEL4
elif (spec[x] == 6):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL0
panel[4][x] = LEVEL0
panel[5][x] = LEVEL0
panel[6][x] = LEVEL3
panel[7][x] = LEVEL4
elif (spec[x] == 7):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL0
panel[4][x] = LEVEL0
panel[5][x] = LEVEL1
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 8):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL0
panel[4][x] = LEVEL0
panel[5][x] = LEVEL2
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 9):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL0
panel[4][x] = LEVEL0
panel[5][x] = LEVEL3
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 10):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL0
panel[4][x] = LEVEL1
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 11):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL0
panel[4][x] = LEVEL2
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 12):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL0
panel[4][x] = LEVEL3
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 13):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL1
panel[4][x] = LEVEL4
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 14):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL2
panel[4][x] = LEVEL4
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 15):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL0
panel[3][x] = LEVEL3
panel[4][x] = LEVEL4
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 16):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL1
panel[3][x] = LEVEL4
panel[4][x] = LEVEL4
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 17):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL2
panel[3][x] = LEVEL4
panel[4][x] = LEVEL4
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 18):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL0
panel[2][x] = LEVEL3
panel[3][x] = LEVEL4
panel[4][x] = LEVEL4
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 19):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL1
panel[2][x] = LEVEL4
panel[3][x] = LEVEL4
panel[4][x] = LEVEL4
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 20):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL2
panel[2][x] = LEVEL4
panel[3][x] = LEVEL4
panel[4][x] = LEVEL4
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 21):
panel[0][x] = LEVEL0
panel[1][x] = LEVEL3
panel[2][x] = LEVEL4
panel[3][x] = LEVEL4
panel[4][x] = LEVEL4
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 22):
panel[0][x] = LEVEL1
panel[1][x] = LEVEL4
panel[2][x] = LEVEL4
panel[3][x] = LEVEL4
panel[4][x] = LEVEL4
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
elif (spec[x] == 23):
panel[0][x] = LEVEL2
panel[1][x] = LEVEL3
panel[2][x] = LEVEL4
panel[3][x] = LEVEL4
panel[4][x] = LEVEL4
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
else:
panel[0][x] = LEVEL3
panel[1][x] = LEVEL4
panel[2][x] = LEVEL4
panel[3][x] = LEVEL4
panel[4][x] = LEVEL4
panel[5][x] = LEVEL4
panel[6][x] = LEVEL4
panel[7][x] = LEVEL4
if (spec[x] / 3 >= 9):
bottom_num = 9
else:
bottom_num = (spec[x] / 3)
panel[7][x] = num_to_pattern[bottom_num]
panel[0][23] = num_to_pattern[count / 1 % 10]
panel[0][22] = num_to_pattern[count / 10 % 10]
panel[0][21] = num_to_pattern[count / 100 % 10]
panel[0][20] = num_to_pattern[count / 1000 % 10]
panel[0][19] = num_to_pattern[count / 10000 % 10]
xfer_data = panel_to_command(panel, 0x01)
xfer_data = map(reverse_bit_order, xfer_data)
for i in range(6):
spi.writebytes(xfer_data[i*36:(i+1)*36])
#time.sleep(0.001)
#time.sleep(0.015)
time.sleep(0.005) # 60fps
#time.sleep(0.025) # 30fps
#time.sleep(0.040) # 20fps
# 表示更新(LATCH=_| ̄|_)
GPIO.output(26, GPIO.HIGH)
GPIO.output(26, GPIO.LOW)
#time.sleep(0.100)
elapsed_time = time.time() - start_time
print('%f [s]' % elapsed_time)
print('fps = %f' % (count / elapsed_time))
# 表示消去
panel = [[0 for x in range(24)] for y in range(8)]
xfer_data = panel_to_command(panel, 0x01)
xfer_data = map(reverse_bit_order, xfer_data)
for i in range(6):
spi.writebytes(xfer_data[i*36:(i+1)*36])
time.sleep(0.001)
time.sleep(0.010)
GPIO.output(26, GPIO.HIGH)
GPIO.output(26, GPIO.LOW)
GPIO.cleanup()
print('Done.')
|
import json
import re
from gendocs import DEPRECATED_INFO_FILE, DeprecatedInfo, INTEGRATION_DOCS_MATCH, findfiles, process_readme_doc, \
index_doc_infos, DocInfo, gen_html_doc, process_release_doc, process_extra_readme_doc, \
INTEGRATIONS_PREFIX, get_deprecated_data, insert_approved_tags_and_usecases, \
find_deprecated_integrations, get_blame_date, get_deprecated_display_dates, \
get_fromversion_data, add_deprected_integrations_info, merge_deprecated_info, get_extracted_deprecated_note
from mdx_utils import verify_mdx, fix_mdx, start_mdx_server, stop_mdx_server, verify_mdx_server, fix_relative_images, normalize_id
import os
import pytest
from datetime import datetime
import dateutil.relativedelta
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
SAMPLE_CONTENT = f'{BASE_DIR}/test_data/sample-content'
@pytest.fixture(scope='module')
def mdx_server():
yield start_mdx_server()
print('Cleaning up MDX server')
stop_mdx_server()
def test_verify_mdx():
try:
verify_mdx(f'{BASE_DIR}/test_data/bad-mdx-readme.md')
assert False, 'should fail verify'
except Exception as ex:
assert 'Expected corresponding JSX closing tag' in str(ex)
def test_verify_mdx_server(mdx_server):
with open(f'{BASE_DIR}/test_data/good-readme.md', mode='r', encoding='utf-8') as f:
data = f.read()
verify_mdx_server(data)
# test bad readme
try:
with open(f'{BASE_DIR}/test_data/bad-mdx-readme.md', mode='r', encoding='utf-8') as f:
data = f.read()
verify_mdx_server(data)
assert False, 'should fail verify'
except Exception as ex:
assert 'Expected corresponding JSX closing tag' in str(ex)
def test_fix_mdx():
res = fix_mdx('this<br>is<hr>')
assert '<br/>' in res
assert '<hr/>' in res
res = fix_mdx('<!-- html comment \n here --> some text <!-- another comment here-->')
assert 'some text ' in res
assert '<!--' not in res
assert '-->' not in res
assert 'html comment' not in res
res = fix_mdx('multiple<br>values<br>')
assert '<br>' not in res
res = fix_mdx('valide br: <br></br>')
assert '<br></br>' in res
def test_fix_relative_images(tmp_path):
readme = f'{SAMPLE_CONTENT}/Packs/GoogleCalendar/Integrations/GoogleCalendar/README.md'
with open(readme, 'r') as f:
content = f.read()
res = fix_relative_images(content, f'{SAMPLE_CONTENT}/Packs/GoogleCalendar/Integrations/GoogleCalendar',
'google-calendar', str(tmp_path), 'relative-test')
target_img_name = 'google-calendar-_-__-__-doc_files-add-scope-admin-3.png'
assert f'relative-test/{target_img_name}' in res
os.path.isfile(tmp_path / target_img_name)
# test a readme that shouldn't change
readme = f'{SAMPLE_CONTENT}/Integrations/Gmail/README.md'
with open(readme, 'r') as f:
content = f.read()
res = fix_relative_images(content, f'{SAMPLE_CONTENT}/Integrations/Gmail', 'google-calendar', str(tmp_path), 'relative-test')
assert res == content
def test_fix_relative_images_html_img(tmp_path):
readme = f'{SAMPLE_CONTENT}/Packs/ProofpointServerProtection/Integrations/ProofpointProtectionServerV2/README.md'
with open(readme, 'r') as f:
content = f.read()
res = fix_relative_images(content, f'{SAMPLE_CONTENT}/Packs/ProofpointServerProtection/Integrations/ProofpointProtectionServerV2',
'proofpoint-test', str(tmp_path), 'relative-test')
target_img_name = 'proofpoint-test-_-__-__-doc_imgs-api_role.png'
assert f'relative-test/{target_img_name}' in res
os.path.isfile(tmp_path / target_img_name)
def test_findfiles():
res = findfiles(INTEGRATION_DOCS_MATCH, SAMPLE_CONTENT)
assert f'{SAMPLE_CONTENT}/Packs/CortexXDR/Integrations/PaloAltoNetworks_XDR/README.md' in res
assert f'{SAMPLE_CONTENT}/Integrations/SMIME_Messaging/readme.md' in res
assert f'{SAMPLE_CONTENT}/Integrations/PhishLabsIOC_DRP/README.md' in res
assert f'{SAMPLE_CONTENT}/Beta_Integrations/SymantecDLP/README.md' in res
assert f'{SAMPLE_CONTENT}/Integrations/integration-F5_README.md' in res
def test_process_readme_doc(tmp_path):
res = process_readme_doc(str(tmp_path), SAMPLE_CONTENT, 'integrations',
str(tmp_path), "dummy-relative", f'{SAMPLE_CONTENT}/Integrations/DomainTools_Iris/README.md')
assert res.id == 'domain-tools-iris'
assert res.description
assert res.name == 'DomainTools Iris'
with open(str(tmp_path / f'{res.id}.md'), 'r') as f:
assert f.readline().startswith('---')
assert f.readline().startswith(f'id: {res.id}')
assert f.readline().startswith(f'title: "{res.name}"')
assert f.readline().startswith('custom_edit_url: https://github.com/demisto/content/')
content = f.read()
assert 'dummy-relative' not in content
res = process_readme_doc(str(tmp_path), BASE_DIR, 'integrations', str(tmp_path), "dummy-relative", f'{BASE_DIR}/test_data/empty-readme.md')
assert 'no yml file found' in res.error_msg
process_readme_doc(str(tmp_path), SAMPLE_CONTENT,
'integrations', str(tmp_path), "dummy-relative",
f'{SAMPLE_CONTENT}/Integrations/SlashNextPhishingIncidentResponse/README.md')
process_readme_doc(str(tmp_path), SAMPLE_CONTENT, 'integrations',
str(tmp_path), "dummy-relative", f'{SAMPLE_CONTENT}/Integrations/Gmail/README.md')
def test_process_readme_doc_same_dir(tmp_path):
res = process_readme_doc(str(tmp_path), SAMPLE_CONTENT, 'integrations',
str(tmp_path), "dummy-relative", f'{SAMPLE_CONTENT}/Integrations/integration-F5_README.md')
assert res.id == 'f5-firewall'
assert res.description
assert res.name == 'F5 firewall'
with open(str(tmp_path / f'{res.id}.md'), 'r') as f:
assert f.readline().startswith('---')
assert f.readline().startswith(f'id: {res.id}')
assert f.readline().startswith(f'title: "{res.name}"')
assert f.readline().startswith('custom_edit_url: https://github.com/demisto/content/')
content = f.read()
assert 'dummy-relative' not in content
def test_process_readme_doc_edl(tmp_path):
res = process_readme_doc(str(tmp_path), SAMPLE_CONTENT,
'integrations', str(tmp_path), "dummy-relative",
f'{SAMPLE_CONTENT}/Integrations/PaloAltoNetworks_PAN_OS_EDL_Management/README.md')
assert res.name == 'Palo Alto Networks PAN-OS EDL Management'
def test_process_readme_doc_playbookl(tmp_path):
res = process_readme_doc(str(tmp_path), SAMPLE_CONTENT,
'integrations', str(tmp_path), "dummy-relative",
f'{SAMPLE_CONTENT}/Playbooks/playbook-lost_stolen_device_README.md')
assert res.name == 'Lost / Stolen Device Playbook'
assert 'Initial incident details should be the name of the reporting person' in res.description
def test_process_code_script(tmp_path):
res = process_readme_doc(str(tmp_path), SAMPLE_CONTENT,
'integrations', str(tmp_path), "dummy-relative",
f'{SAMPLE_CONTENT}/Scripts/script-IsIPInRanges_README.md')
assert res.id == 'is-ip-in-ranges'
assert res.description
assert res.name == 'IsIPInRanges'
with open(str(tmp_path / f'{res.id}.md'), 'r') as f:
assert f.readline().startswith('---')
assert f.readline().startswith(f'id: {res.id}')
assert f.readline().startswith(f'title: "{res.name}"')
assert f.readline().startswith('custom_edit_url: https://github.com/demisto/content/')
content = f.read()
assert 'dummy-relative' not in content
def test_table_doc_info():
doc_infos = [
DocInfo('test', 'Test Integration', "this is a description\nwith new line", "test/README.md")
]
res = index_doc_infos(doc_infos, 'integrations')
assert '<br/>' in res
assert '(integrations/test)' in res # verify link
def test_gen_html():
res = gen_html_doc("""
This is line 1
This is line 2
""")
assert 'This is line 1\\n' in res
def test_bad_html():
bad_html = open(f'{SAMPLE_CONTENT}/Integrations/Vectra_v2/README.md', encoding='utf-8').read()
assert '>=' in bad_html
res = gen_html_doc(bad_html)
assert '>=' not in res
def test_normalize_id():
assert normalize_id("that's not good") == 'thats-not-good'
assert normalize_id("have i been pwned? v2") == 'have-i-been-pwned-v2'
assert normalize_id("path/with/slash/and..-dots") == 'pathwithslashand-dots'
def test_process_release_doc(tmp_path, mdx_server):
last_month = datetime.now() + dateutil.relativedelta.relativedelta(months=-1)
version = last_month.strftime('%y.%-m.0')
release_file = f'{os.path.dirname(os.path.abspath(__file__))}/extra-docs/releases/{version}.md'
res = process_release_doc(str(tmp_path), release_file)
assert res.id == version
assert res.description.startswith('Published on')
assert res.name == f'Content Release {version}'
with open(str(tmp_path / f'{res.id}.md'), 'r') as f:
assert f.readline().startswith('---')
assert f.readline().startswith(f'id: {res.id}')
assert f.readline().startswith(f'sidebar_label: "{res.id}"')
assert f.readline().startswith('custom_edit_url: https://github.com/demisto/content-docs/blob/master/content-repo/extra-docs/releases')
def test_process_release_doc_old(tmp_path, mdx_server):
release_file = f'{os.path.dirname(os.path.abspath(__file__))}/extra-docs/releases/18.9.1.md'
res = process_release_doc(str(tmp_path), release_file)
# old file should be ignored
assert res is None
def test_process_extra_doc(tmp_path, mdx_server):
release_file = f'{os.path.dirname(os.path.abspath(__file__))}/extra-docs/integrations/remote-access.md'
res = process_extra_readme_doc(str(tmp_path), INTEGRATIONS_PREFIX, release_file)
assert not res.error_msg
assert res.id == 'remote-access'
assert res.description.startswith('File transfer and execute commands')
assert res.name == 'Remote Access'
with open(str(tmp_path / f'{res.id}.md'), 'r') as f:
assert f.readline().startswith('---')
assert f.readline().startswith(f'id: {res.id}')
assert f.readline().startswith(f'title: "{res.name}"')
assert f.readline().startswith('custom_edit_url: https://github.com/demisto/content-docs/blob/master/content-repo/extra-docs/integrations')
def test_process_private_doc(tmp_path, mdx_server):
readme_file_path = f'{SAMPLE_CONTENT}/Packs/HelloWorldPremium/Playbooks' \
f'/playbook-Handle_Hello_World_Premium_Alert_README.md'
res = process_extra_readme_doc(str(tmp_path), 'Playbooks', readme_file_path, private_packs=True)
assert not res.error_msg
assert res.id == 'handle-hello-world-premium-alert'
assert res.description.startswith('This is a playbook which will handle the alerts')
assert res.name == 'Handle Hello World Premium Alert'
with open(str(tmp_path / f'{res.id}.md'), 'r') as f:
assert f.readline().startswith('---')
assert f.readline().startswith(f'id: {res.id}')
assert f.readline().startswith(f'title: "{res.name}"')
def test_get_deprecated_data():
res = get_deprecated_data({"deprecated": True}, "Deprecated - We recommend using ServiceNow v2 instead.", "README.md")
assert "We recommend using ServiceNow v2 instead." in res
assert get_deprecated_data({"deprecated": False}, "stam", "README.md") == ""
res = get_deprecated_data({"deprecated": True}, "Deprecated: use Shodan v2 instead. Search engine for Internet-connected devices.", "README.md")
assert "Use Shodan v2 instead" in res
res = get_deprecated_data({"deprecated": True}, "Deprecated. Use The Generic SQL integration instead.", "README.md")
assert "Use The Generic SQL integration instead" in res
res = get_deprecated_data({}, "Deprecated. Add information about the vulnerability.", "Packs/DeprecatedContent/Playbooks/test-README.md")
assert "Add information" not in res
@pytest.mark.parametrize("test_input, expected", [({'fromversion': '5.5.0'},
':::info Supported versions\nSupported '
'Cortex XSOAR versions: 5.5.0 and later.\n:::\n\n'),
({'fromversion': '5.0.0'}, ''),
({}, ''),
({'fromversion': '4.0.0'}, '')])
def test_get_fromversion_data(test_input, expected):
res = get_fromversion_data(test_input)
assert res == expected
def test_insert_approved_tags_and_usecases(tmp_path):
"""
Given:
- Approved tags and usecases lists
- Content docs article
When:
- Inserting approved tags and usecases to the content docs article
Then:
- Ensure the approved tags and use cases are added to the content docs article as expected
"""
documentation_dir = tmp_path / 'docs' / 'documentation'
documentation_dir.mkdir(parents=True)
pack_docs = documentation_dir / 'pack-docs.md'
pack_docs.write_text("""
***Use-case***
***Tags***
""")
content_repo_dir = tmp_path / 'content-repo'
content_repo_dir.mkdir()
approved_usecases = content_repo_dir / 'approved_usecases.json'
approved_usecases.write_text(json.dumps({
'approved_list': [
'Hunting',
'Identity And Access Management'
]
}))
approved_tags = content_repo_dir / 'approved_tags.json'
approved_tags.write_text(json.dumps({
'approved_list': [
'IoT',
'Machine Learning'
]
}))
os.chdir(str(content_repo_dir))
insert_approved_tags_and_usecases()
with open(str(pack_docs), 'r') as pack_docs_file:
pack_docs_file_content = pack_docs_file.read()
assert '***Use-case***' in pack_docs_file_content
assert '<details>' in pack_docs_file_content
assert '<summary>Pack Use-cases</summary>' in pack_docs_file_content
assert 'Hunting' in pack_docs_file_content
assert 'Identity And Access Management' in pack_docs_file_content
assert '***Tags***' in pack_docs_file_content
assert '<summary>Pack Tags</summary>' in pack_docs_file_content
assert 'IoT' in pack_docs_file_content
assert 'Machine Learning' in pack_docs_file_content
assert '</details>' in pack_docs_file_content
def test_get_blame_date():
res = get_blame_date(SAMPLE_CONTENT, f'{SAMPLE_CONTENT}/Packs/DeprecatedContent/Integrations/integration-AlienVaultOTX.yml', 6)
assert res.month == 1
assert res.year == 2021
SAMPLE_CONTENT_DEP_INTEGRATIONS_COUNT = 7
def test_find_deprecated_integrations():
res = find_deprecated_integrations(SAMPLE_CONTENT)
for info in res:
assert '2021' in info['maintenance_start']
assert len(res) == SAMPLE_CONTENT_DEP_INTEGRATIONS_COUNT
def test_add_deprected_integrations_info(tmp_path):
deprecated_doc = tmp_path / "deprecated_test.md"
deprecated_info = tmp_path / "deprecated_info_test.json"
with open(deprecated_info, "wt") as f:
json.dump({"integrations": []}, f)
add_deprected_integrations_info(SAMPLE_CONTENT, str(deprecated_doc), str(deprecated_info), str(tmp_path))
with open(deprecated_doc, "rt") as f:
dep_content = f.read()
assert len(re.findall('Maintenance Mode Start Date', dep_content)) == SAMPLE_CONTENT_DEP_INTEGRATIONS_COUNT
with open(tmp_path / "deprecated_test.json", 'r') as f:
dep_json = json.load(f)
assert len(dep_json['integrations']) == SAMPLE_CONTENT_DEP_INTEGRATIONS_COUNT
def test_merge_deprecated_info():
infos = [
DeprecatedInfo(id="mssql", name="test1 name"),
DeprecatedInfo(id="test2", name="test2 name")
]
res = merge_deprecated_info(infos, DEPRECATED_INFO_FILE)
res_map = {i['id']: i for i in res}
assert res_map['mssql']['name'] == 'SQL Server'
assert res_map['test2']['name'] == "test2 name"
assert res_map['slack']['name'] == "Slack"
def test_get_deprecated_display_dates():
(start, end) = get_deprecated_display_dates(datetime(2020, 12, 30))
assert start == "Jan 01, 2021"
assert end == "Jul 01, 2021"
def test_get_extracted_deprecated_note():
res = get_extracted_deprecated_note(
'Human-vetted, Phishing-specific Threat Intelligence from Phishme. Deprecated. Use the Cofense Intelligence integration instead.')
assert res == 'Use the Cofense Intelligence integration instead.'
res = get_extracted_deprecated_note('Deprecated. Vendor has stopped this service. No available replacement.')
assert res == 'Vendor has stopped this service. No available replacement.'
|
"""Settings and configuration
"""
import os
from copy import deepcopy
import yaml
HOME = os.path.expanduser('~')
"""Full path to your home directory."""
BASE = f"{HOME}/github"
"""Local directory where all your local clones of GitHub repositories are stored."""
ORG = "among"
"""Organization on GitHub in which this code/data repository resides.
This is also the name of the parent directory of your local clone
of this repo.
"""
REPO = "fusus"
"""Name of this code/data repository."""
REPO_DIR = f"{BASE}/{ORG}/{REPO}"
"""Directory of the local repo.
This is where this repo resides on your computer.
Note that we assume you have followed the convention
that it is in your home directory, and then
`github/among/fusus`.
"""
PROGRAM_DIR = f"{REPO_DIR}/{REPO}"
"""The subdirectory in the repo that contains the `fusus` Python package`.
"""
LOCAL_DIR = f"{REPO_DIR}/_local"
"""Subdirectory containing unpublished input material.
This is material that we cannot make public in this repo.
This directory is not pushed to the online repo,
by virtue of its being in the `.gitignore` of this repo.
See also `UR_DIR`.
"""
SOURCE_DIR = f"{LOCAL_DIR}/source"
"""Subdirectory containing source texts.
Here are the sources that we cannot make public in this repo.
See also `UR_DIR` and `LOCAL_DIR`.
"""
UR_DIR = f"{REPO_DIR}/ur"
"""Subdirectory containing the public source texts.
Here are the sources that we can make public in this repo.
See also `SOURCE_DIR`.
"""
ALL_PAGES = "allpages"
KRAKEN = dict(
modelPath=f"{REPO_DIR}/model/arabic_generalized.mlmodel"
)
COLORS = dict(
greyGRS=200,
blackGRS=0,
blackRGB=(0, 0, 0),
whiteGRS=255,
whiteRGB=(255, 255, 255),
greenRGB=(0, 255, 0),
orangeRGB=(255, 127, 0),
purpleRGB=(255, 0, 127),
blockRGB=(0, 255, 255),
letterRGB=(0, 200, 200),
upperRGB=(0, 200, 0),
lowerRGB=(200, 0, 0),
horizontalStrokeRGB=(0, 128, 255),
verticalStrokeRGB=(255, 128, 0),
marginGRS=255,
marginRGB=(200, 200, 200),
cleanRGB=(255, 255, 255),
cleanhRGB=(220, 220, 220),
boxDeleteRGB=(240, 170, 20),
boxDeleteNRGB=(140, 70, 0),
boxRemainRGB=(170, 240, 40),
boxRemainNRGB=(70, 140, 0),
)
"""Named colors. """
BAND_COLORS = dict(
main=(40, 40, 40),
inter=(255, 200, 200),
broad=(0, 0, 255),
high=(128, 128, 255),
mid=(128, 255, 128),
low=(255, 128, 128),
)
"""Band colors.
Each band will be displayed in its own color.
"""
STAGES = dict(
orig=("image", True, None, None, None),
gray=("image", False, None, None, None),
blurred=("image", False, None, None, None),
normalized=("image", False, None, "proofDir", ""),
normalizedC=("image", True, None, None, None),
layout=("image", True, None, None, None),
histogram=("image", True, None, None, None),
demargined=("image", False, None, None, None),
demarginedC=("image", True, None, None, None),
markData=("data", None, "tsv", None, None),
boxed=("image", True, None, None, None),
cleanh=("image", False, None, None, None),
clean=("image", False, None, "cleanDir", ""),
binary=("image", False, None, None, None),
char=("data", None, "tsv", "proofDir", None),
word=("data", None, "tsv", "outDir", ""),
line=("data", None, "tsv", "proofDir", "line"),
proofchar=("link", True, "html", "proofDir", "char"),
proofword=("link", True, "html", "proofDir", ""),
)
"""Stages in page processing.
When we process a scanned page,
we produce named intermediate stages,
in this order.
The stage data consists of the following bits of information:
* kind: image or data (i.e. tab separated files with unicode data).
* colored: True if colored, False if grayscale, None if not an image
* extension: None if an image file, otherwise the extension of a data file, e.g. `tsv`
"""
SETTINGS = dict(
debug=0,
inDir="in",
outDir="out",
interDir="inter",
cleanDir="clean",
proofDir="proof",
textDir="text",
marksDir="marks",
blurX=21,
blurY=21,
marginThresholdX=1,
contourFactor=0.3,
contourOffset=0.04,
peakProminenceY=5,
peakSignificant=0.1,
peakTargetWidthFraction=0.5,
valleyProminenceY=5,
outerValleyShiftFraction=0.3,
blockMarginX=12,
accuracy=0.8,
connectBorder=4,
connectThreshold=200 * 200,
connectRatio=0.1,
boxBorder=3,
maxHits=5000,
bandMain=(5, -5),
bandInter=(5, 5),
bandBroad=(-15, 10),
bandMid=(10, -5),
bandHigh=(10, 30),
bandLow=(-10, -10),
defaultLineHeight=200,
)
"""Customizable settings.
These are the settings that can be customized in several ways.
The values here are the default values.
When the pipeline is run in a book directory, it will look
for a file `parameters.yaml` in the toplevel directory of the book
where these settings can be overridden.
In a program or notebook you can also make last-minute changes to these parameters by
calling the `fusus.book.Book.configure` method which calls the
`Config.configure` method.
The default values can be inspected by expanding the source code.
!!! caution "Two-edged sword"
When you change a parameter to improve a particular effect on a particular page,
it may wreak havoc with many other pages.
When you tweak, take care that you do it locally,
on a single book, or a single page.
debug
: Whether to show (intermediate) results.
If `0`: shows nothing, if `1`: shows end result, if `2`: shows intermediate
results.
inDir
: name of the subdirectory with page scans
outDir
: name of the subdirectory with the final results of the workflow
interDir
: name of the subdirectory with the intermediate results of the workflow
cleanDir
: name of the subdirectory with the cleaned, blockwise images of the workflow
marksDir
: name of the subdirectory with the marks
skewBorder
: the width of the page margins that will be whitened in order to
suppress the sharp black triangles introduces by skewing the page
blurX
: the amount of blur in the X-direction.
Blurring is needed to get better histograms
To much blurring will hamper the binarization, see e.g. pag 102 in the
examples directory: if you blur with 41, 41 binarization fails.
blurY
: the amount of blur in the X-direction.
Blurring is needed to get betterskewing and histograms.
!!! caution "Amount of Y-blurring"
Too much vertical blurring will cause the disappearance of horizontal
bars from the histogram. Footnote bars will go undetected.
Too little vertical blurring will result in ragged histograms,
from which it is difficult to get vertical line boundaries.
marginThresholdX
: used when interpreting horizontal histograms.
When histograms for horizontal lines cross marginThresholdY, it will taken as an
indication that a line boundary (upper or lower) has been reached.
contourFactor
: used when computing left and right contour lines of a page.
Each horizontal line as a left most black pixel and a rightmost one.
Together they form the left contour and the right contour of the page.
The length of each line is the distance between the left contour and right contour
points on that line.
However, to be useful, the contour lines must be smoothed.
We look up and down from each contour point and replace it by the median value of
the contour points above and below that point.
How far do we have to look?
We want to neutralize the interline spaces, so we look up and down for a fraction
line line height.
That fraction is specified by this parameter.
A proxy for the line height is the peak distance.
peakSignificant
: used when interpreting histograms for line detection
When we look for significant peaks in a histogram, we determine the max peak height.
Significant peaks are those that have a height greater than a specific fraction
of the max peak height. This parameter states that fraction.
peakTargetWidthFraction
: used when interpreting histograms for line detection
When we have studied the significant peaks and found the regular distance between
successive peaks, we use that to pass as the `distance` parameter to the SciPy
[find_peaks](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html#scipy.signal.find_peaks)
algorithm. We get the best results if we do not pass the line height itself, but a
fraction of it. This parameter is that fraction.
peakProminenceY, valleyProminenceY
: used when interpreting histograms for line detection
We detect peaks and valleys in the histogram by means of a SciPy algorithm,
to which we pass a prominence parameter.
This will leave out minor peaks and valleys.
outerValleyShiftFraction
: used when interpreting histograms for line detection
The valleys at the outer ends of the histogram tend to be very broad and hence
the valleys will be located too far from the actual ink.
We correct for that by shifting those valleys a fraction of their plateau sizes
towards the ink. This parameter is that fraction.
defaultLineHeight
: used for line detection
After line detection, a value for the line height is found and stored in this
parameter. The parameter is read when there is only one line on a page, in
which case the line detection algorithm has too little information.
If this occurs at the very first calculation of line heights, a fixed
default value is used.
accuracy
: When marks are searched for in the page, we get the result in the form
of a grayscale page where the value in each point reflects how much
the page in that area resembles the mark.
Only hits above the value of *accuracy* will be considered.
connectBorder
: When marks are found, each hit will be inspected:
is the ink in the hit connected to the ink outside the hit?
This will be measured in an inner and outer border of the page,
whose thickness is given by this parameter.
connectThreshold
: After computing inner and outer borders,
they will be inverted, so that black has the maximum
value. Then the inside and outside borders are multiplied pixel wise,
so that places where they are both black get very high values.
All places where this product is higher than the value of *connectThreshold*
are retained for further calculation.
connectRatio
: After computing the places where inner and outer borders contain
joint ink, the ratio of such places with respect to the total border size
is calculated. If that ratio is greater than *connectRatio*,
the hit counts as connected to its surroundings.
We have not found a true instance of the mark, and the mark will not be cleaned.
boxBorder
: The hits after searching for marks will be indicated on the `boxed` stage
of the page by means of a small coloured border around each hit.
The width of this border is *boxBorder*.
maxHits
: When searching for marks, there are usually multiple hits: the place where
the mark occurs, and some places very nearby.
The cleaning algorithm will cluster nearby hits and pick the best
hit per cluster.
But if the number of hits is very large, it is a sign that the mark
is not searched with the right accuracy, and clustering will be
prevented. It would become very expensive, and useless anyway.
A warning will be issued in such cases.
bandMain
: Offsets for the `main` band. Given as `(top, bottom)`, with
`top` and `bottom` positive or negative integers.
This band covers most of the ink in a line.
The `main` band is computed from the histogram after which the
height of the top and bottom boundaries are adjusted relative to the
values obtained by the histogram algorithm.
You can adjust these values: higher values move the boundaries down,
lower values move them up.
In practice, the adjustments are zero for the main band, while
all other bands are derived from the main band by applying adjustments.
bandInter
: Offsets for the `inter` band.
This band covers most of the white between two lines.
The `inter` band is computed from the histogram.
bandBroad
: Offsets for the `broad` band.
This band s like `main` but covers even more ink in a line.
bandMid
: Offsets for the `mid` band.
This band s like `main` but covers the densest part in a line.
bandHigh
: Offsets for the `high` band.
This band s like `inter` but covers the upper part of the letters and the white
space above it.
bandLow
: Offsets for the `low` band.
This band s like `inter` but covers the lower part of the letters and the white
space below it.
"""
MARK_PARAMS = dict(acc="accuracy", bw="connectBorder", r="connectRatio")
CONFIG_FILE = "parameters.yaml"
LINE_CLUSTER_FACTOR = 0.46
"""Clustering characters into lines in the Lakhnawi PDF.
When the characters on a page are divided into lines based on their height,
this parameter determines which heights can be clustered together.
Heights that differ less than the estimated average line height times this
factor, can be clustered together.
!!! caution "tolerance"
When you choose a value of `0.44` or lower for this parameter,
line 8 on the title page will go wrong.
When you choose higher values, you run the risk of clustering
the characters of multiple lines into one line.
"""
class Config:
def __init__(self, tm, **params):
"""Settings manager.
It will expose all settings as attributes to the rest
of the application.
It has methods to collect modified settings from the
user and apply them.
The default settings are kept as a separate copy that
will not be changed in any way.
User modifications act on the current settings,
which have been obtained by deep-copying the defaults.
Parameters
----------
tm: object
Can display timed info/error messages to the display
params: dict
key-value pairs that act as updates for the settings.
If a value is `None`, the original value will be reinstated.
"""
self.tm = tm
# ocr settings
for (k, v) in KRAKEN.items():
setattr(self, k, v)
# colors are fixed
for (k, v) in COLORS.items():
setattr(self, k, v)
# bands
setattr(self, "colorBand", BAND_COLORS)
# stages
setattr(self, "stageOrder", tuple(STAGES))
setattr(self, "stages", STAGES)
# marks
setattr(self, "markParams", MARK_PARAMS)
# settings
self.settings = deepcopy(SETTINGS)
if os.path.exists(CONFIG_FILE):
with open(CONFIG_FILE) as fh:
overrides = yaml.load(fh, Loader=yaml.FullLoader)
# python 3.9 feature
self.settings |= overrides
# configure
self.configure(reset=False, **params)
def configure(self, reset=False, **params):
"""Updates current settings based on new values.
User modifications act on the current settings,
which have been obtained by deep-copying the defaults.
Parameters
----------
reset: boolean, optional `False`
If `True`, a fresh deep copy of the defaults will be made
and that will be the basis for the new current settings.
params: dict
key-value pairs that act as updates for the settings.
If a value is `None`, the original value will be reinstated.
"""
error = self.tm.error
if reset:
self.settings = deepcopy(SETTINGS)
settings = self.settings
for (k, v) in params.items():
if k in SETTINGS:
settings[k] = SETTINGS[k] if v is None else v
else:
error(f"Unknown setting: {k}")
# band offsets
offsetBand = {}
settings["offsetBand"] = offsetBand
for band in self.colorBand:
bandOff = f"band{band[0].upper()}{band[1:]}"
offsetBand[band] = settings[bandOff]
# deliver as attributes
for (k, v) in settings.items():
if not k.startswith("band"):
setattr(self, k, v)
def show(self, params=None):
"""Display current settings.
Parameters
----------
params: str, optional `None`
If `None`, all settings will be displayed.
Else it should be a comma-separated string of legal
parameter names whose values are to be displayed.
"""
tm = self.tm
error = tm.error
info = tm.info
settings = self.settings
params = sorted(set(params.split(',')) if params else settings)
for k in params:
if k in settings and k not in SETTINGS:
continue
if k not in SETTINGS:
error(f"No such setting: {k}", tm=False)
info(f"{k:<30} = {settings[k]}", tm=False)
|
from .algorithm1 import Algorithm
from .cp_ortools import CPModel1
from .Milp1 import Milp1
from .Iterator1 import Iterator1
solvers = \
dict(default=Algorithm,
Milp_LP_HL = Milp1,
ortools=CPModel1,
Iterator_HL = Iterator1
)
# factory of solvers
def get_solver(name='default'):
return solvers.get(name)
|
import sys
import os
from pathlib import Path
cwd = str(os.getcwd())
parent_dir = str(Path(os.getcwd()).parent)
sys.path.append(f'{parent_dir}/short_text_tagger/') # if testing from within tests/
sys.path.append(f'{cwd}/short_text_tagger/') # if testing from parent directory
from short_text_tagger.edgelist import EdgeList
import pytest
import pandas as pd
corpus1 = pd.Series([
["store","love","hair","products"],
[],
["communication","key","1"]
])
corpus2 = pd.Series([],dtype=str)
def test_raw_directed_edgelist():
e1 = EdgeList(corpus1,weighted=False,directed=True)
e2 = EdgeList(corpus2,weighted=False,directed=True)
assert e1 is not None, "edge list needs to exist"
assert e2 is not None, "edge list needs to exist"
assert e2.edgelist.equals(pd.DataFrame({"source":[],"target":[]})), "edge lists do not match"
assert e1.edgelist.equals(pd.DataFrame({
"source":["store","store","store","love","love","hair","communication","communication","key"],
"target":["love","hair","products","hair","products","products","key","1","1",]
})), "edge lists do not match"
def test_raw_undirected_edgelist():
e1 = EdgeList(corpus1,weighted=False,directed=False)
e2 = EdgeList(corpus2,weighted=False,directed=False)
assert e1 is not None, "edge list needs to exist"
assert e2 is not None, "edge list needs to exist"
assert e2.edgelist.equals(pd.DataFrame({"source":[],"target":[]})), "edge lists do not match"
assert e1.edgelist.equals(pd.DataFrame({
"source":["hair","hair","hair","love","love","products","1","1","communication"],
"target":["love","products","store","products","store","store","communication","key","key",]
})), "edge lists do not match"
|
import bob.bio.base
preprocessor = bob.bio.base.preprocessor.Filename()
|
# -*- coding:UTF-8 -*-
import json
class Vin:
'解析所有汽车公司的VIN码,参数:VIN规则json文件和vin码;返回解析结果。'
__content = None # json文件内容dict类型变量
def __init__(self, filename, vin):
self.filename = filename
self.vin = vin
try:
fo = open(filename, 'r')
self.__content = json.load(fo)
except Exception, e:
print "JSON file loading failed!"+e.message
def __del__(self):
class_name = self.__class__.__name__
print class_name, "del"
# 得到json数据
def getContent(self, str1, str2):
try:
result = self.__content[str1][str2]
except Exception, e:
result = 'null'
print 'Illegal parameters:' + e.message
return result
# for items in self.__content:
# for item in items:
# print(self.__content[items][item])
# 此方法行不通,不能正常遍历。
# VIN除前三位,之后的每一位对应一个标识。
def singleMarkVin(self, vin_dict, vinArray):
result = {vin_dict["1to3"]: self.getContent(vin_dict["1to3"], vinArray[:3])}
dict_length = 3 + vin_dict.__len__() - 1
for i in range(3, dict_length):
if i != 8:
result[vin_dict[str(i + 1)]] = self.getContent(vin_dict[str(i + 1)], vinArray[i])
else:
pass
result[vin_dict[str(dict_length + 1)]] = self.getContent(vin_dict[str(dict_length + 1)], vinArray[dict_length])
return result
# VIN的七八位合并使用。
def mergeMarkVin(self, vin_dict, vinArray):
result = {vin_dict["1to3"]: self.getContent(vin_dict["1to3"], vinArray[:3])}
dict_length = 3 + vin_dict.__len__() + 1
for i in range(3, dict_length):
if i > 8 or i < 6:
temp = self.getContent(vin_dict[str(i + 1)], vinArray[i])
if type(temp) is dict:
try:
result[vin_dict[str(i + 1)]] = temp[vinArray[6:8]]
except Exception, e:
result[vin_dict[str(i + 1)]] = 'null'
print 'Illegal parameters:' + e.message
else:
result[vin_dict[str(i + 1)]] = temp
elif i == 6:
result[vin_dict["7to8"]] = self.getContent(vin_dict["7to8"], vinArray[6:8])
else:
pass
return result
# 获得东风乘用车VIN解析结果
def getDFCYC(self):
vinArray = bytes(self.vin)
vin_dict = {"1to3":"WMI", "4": "BRAND", "5": "VTYPE", "6": "ENGINE", "7": "RESTRAINT", "8": "TRANSMISSION", "10": "YEAR",
"11": "ASSEMBLY"}
return self.singleMarkVin(vin_dict, vinArray)
# 获得广汽本田VIN解析结果
def getGQBT(self):
vinArray = bytes(self.vin)
vin_dict = {"1to3":"WMI", "4": "VDS", "10": "YEAR", "11": "ASSEMBLY"}
result = {vin_dict["1to3"]: self.getContent(vin_dict["1to3"], vinArray[:3])}
dictVDS = self.getContent(vin_dict["4"], vinArray[3:8])
for items in dictVDS:
if type(dictVDS[items]) is dict:
for item in sorted(dictVDS[items]):
result[item] = dictVDS[items][item]
else:
result[items] = dictVDS[items]
result[vin_dict["10"]] = self.getContent(vin_dict["10"], vinArray[9])
result[vin_dict["11"]] = self.getContent(vin_dict["11"], vinArray[10])
return result
# 获得上汽大众VIN解析结果
def getSQDZ(self):
vinArray = bytes(self.vin)
vin_dict = {"1to3": "WMI", "4": "KAROSSERIE", "5": "MOTOR", "6": "RUECKHALTSYSTEM", "7to8": "FZG-KLASSE", "10": "YEAR", "11": "ASSEMBLY"}
result = {vin_dict["1to3"]: self.getContent(vin_dict["1to3"], vinArray[:3])}
result[vin_dict["4"]] = self.getContent(vin_dict["4"], vinArray[3])
temp = self.getContent(vin_dict["7to8"], vinArray[6:8])
print temp
if temp == 'null':
result[vin_dict["7to8"]] = 'null'
result[vin_dict["5"]] = 'null'
else:
result[vin_dict["7to8"]] = temp.keys()[0]
try:
result[vin_dict["5"]] = temp[temp.keys()[0]][vinArray[4]]
except Exception, e:
result[vin_dict["5"]] = 'null'
result[vin_dict["6"]] = self.getContent(vin_dict["6"], vinArray[5])
result[vin_dict["10"]] = self.getContent(vin_dict["10"], vinArray[9])
result[vin_dict["11"]] = self.getContent(vin_dict["11"], vinArray[10])
return result
# 获得北汽福田戴姆勒VIN解析结果
def getBQFTDML(self):
vinArray = bytes(self.vin)
vin_dict = {"1to3": "WMI", "4": "VSERIES", "5": "TMASS", "6": "BTYPE", "7": "ENGINE", "8": "WHEELBASE", "10": "YEAR", "11": "ASSEMBLY"}
return self.singleMarkVin(vin_dict, vinArray)
# 获得北京奔驰VIN解析结果
def getBJBC(self):
vinArray = bytes(self.vin)
vin_dict = {"1to3": "WMI", "4": "VSERIES", "5": "BTYPE", "6to7": "ENGINE", "8": "RESTRAINT", "10": "YEAR"}
result = {vin_dict["1to3"] : self.getContent(vin_dict["1to3"], vinArray[:3])}
for i in range(3, 5):
result[vin_dict[str(i + 1)]] = self.getContent(vin_dict[str(i + 1)], vinArray[i])
result[vin_dict["6to7"]] = self.getContent(vin_dict["6to7"], vinArray[5:7])
result[vin_dict["8"]] = self.getContent(vin_dict["8"], vinArray[7])
result[vin_dict["10"]] = self.getContent(vin_dict["10"], vinArray[9])
return result
# 获得北京汽车集团有限公司VIN解析结果
def getBJQC(self):
vinArray = bytes(self.vin)
vin_dict_LNB_LPB = {"1to3": "WMI", "4": "VTYPE", "5": "LENGTH&SEATING", "6": "BTYPE", "7": "ENGINE", \
"8": "RESTRAINT","10": "YEAR", "11": "ASSEMBLY"}
vin_dict_LHB = {"1to3": "WMI", "4": "VTYPE", "5": "TCARGO&CCARGO", "6": "BTYPE", "7": "ENGINE",\
"8": "WHEELBASE","10": "YEAR", "11": "ASSEMBLY"}
vin_dict_LMB = {"1to3": "WMI", "4": "PPOSITION", "5": "TCARGO&CCARGO", "6": "BTYPE", "7": "ENGINE",\
"8": "WHEELBASE","10": "YEAR", "11": "ASSEMBLY"}
vin_dict_LJB = {"1to3": "WMI", "4": "VTYPE", "5": "TCARGO&CCARGO", "6": "BTYPE", "7": "BRAKE", \
"8": "WHEELBASE","10": "YEAR", "11": "ASSEMBLY"}
wmi = vinArray[:3]
if wmi == 'LHB':
vin_dict = vin_dict_LHB
elif wmi == 'LMB':
vin_dict = vin_dict_LMB
elif wmi == 'LJB':
vin_dict = vin_dict_LJB
elif wmi == 'LNB' or wmi == 'LPB':
vin_dict = vin_dict_LNB_LPB
else:
return 'Illegal VIN'
temp = self.__content[wmi]
result= {vin_dict["1to3"] : self.getContent(vin_dict["1to3"], wmi)}
# 此循环不调用getContent方法,因为数据有差异
for i in range(3, 8):
if i != 8:
try:
result[vin_dict[str(i + 1)]] = temp[vin_dict[str(i + 1)]][vinArray[i]]
except Exception, e:
result[vin_dict[str(i + 1)]] = 'null'
print 'Illegal parameters:' + e.message
else:
pass
result[vin_dict["10"]] = self.getContent(vin_dict["10"], vinArray[9])
result[vin_dict["11"]] = self.getContent(vin_dict["11"], vinArray[10])
return result
# 获得东风商用车VIN解析结果
def getDFSYC(self):
vinArray = bytes(self.vin)
vin_dict_car = {"1to3": "WMI", "4": "VTYPE", "5": "CAR_BODY_STYLE", "6": "CAR_ENGINE", "7": "CAR_BUS_TYPE",
"8": "NON_TRAILER_AXLEBASE", "10": "YEAR", "11": "ASSEMBLY"}
vin_dict_bus = {"1to3": "WMI", "4": "VTYPE", "5": "BUS_CHASSIS_LENGTH", "6": "TRUCK_BUS_ENGINE", "7": "CAR_BUS_TYPE",
"8": "NON_TRAILER_AXLEBASE", "10": "YEAR", "11": "ASSEMBLY"}
vin_dict_buschassis = {"1to3": "WMI", "4": "VTYPE", "5": "BUS_CHASSIS_LENGTH", "6": "TRUCK_BUS_ENGINE", "7": "BUS_CHASSIS",
"8": "NON_TRAILER_AXLEBASE", "10": "YEAR", "11": "ASSEMBLY"}
vin_dict_trailer = {"1to3": "WMI", "4": "VTYPE", "5": "NON_CAR_BUS_TMASS", "6": "TRAILER_AXLE_V", "7": "TRAILER_STYLE",
"8": "TRAILER_LENGTH", "10": "YEAR", "11": "ASSEMBLY"}
vin_dict_truck = {"1to3": "WMI", "4": "VTYPE", "5": "NON_CAR_BUS_TMASS", "6": "TRUCK_BUS_ENGINE", "7": "TRUCK_DRIVE",
"8": "NON_TRAILER_AXLEBASE", "10": "YEAR", "11": "ASSEMBLY"}
vin_dict_truckchassis = {"1to3": "WMI", "4": "VTYPE", "5": "NON_CAR_BUS_TMASS", "6": "TRUCK_BUS_ENGINE", "7": "TRUCK_DRIVE",
"8": "NON_TRAILER_AXLEBASE", "10": "YEAR", "11": "ASSEMBLY"}
vType = self.getContent("VTYPE", vinArray[3])
temp_vType = vType.split(' ')
temp0 = temp_vType[0].encode('utf-8')
if temp0 == '乘用车':
vin_dict = vin_dict_car
elif temp0 == '客车':
vin_dict = vin_dict_bus
elif temp0 == '客车底盘':
vin_dict = vin_dict_buschassis
elif temp0 == '挂车':
vin_dict = vin_dict_trailer
elif temp_vType[1].encode('utf-8') == '完整车辆':
vin_dict = vin_dict_truck
else:
vin_dict = vin_dict_truckchassis
return self.singleMarkVin(vin_dict, vinArray)
# 获得一汽大众VIN解析结果
def getYQDZ(self):
vinArray = bytes(self.vin)
vin_dict = {"1to3": "WMI", "4": "ENGINE_MOTOR_CAPACITY", "5": "CAR_BODY_STYLE", "6": "ENGINE", "7to8": "VTYPE", "10": "YEAR",
"11": "ASSEMBLY"}
return self.mergeMarkVin(vin_dict, vinArray)
# 获得大众进口车VIN解析结果
def getDZJK(self):
vinArray = bytes(self.vin)
vin_dict = {"1to3": "WMI", "4": "VMODEL", "5": "MOTOR", "6": "RESTRAINT",
"7to8": "VTYPE", "10": "YEAR", "11": "ASSEMBLY"}
return self.mergeMarkVin(vin_dict, vinArray)
|
import torch
import matplotlib.pyplot as plt
def show_frame(frame: torch.tensor):
fig = plt.figure(figsize = (16,12)) # create a 5 x 5 figure
ax = fig.add_subplot(111)
ax.imshow(frame.numpy(), interpolation='bicubic')
def show_frames(frames: torch.tensor, nrows=10, ncols=10, figsize=(20,16)):
fig, ax = plt.subplots(nrows, ncols, figsize = figsize)
for i, frame in enumerate(frames):
ax[i//ncols][i%ncols].imshow(frame.numpy(), interpolation='bicubic')
ax[i//ncols][i%ncols].axis('off')
plt.tight_layout() |
# Generated by Django 3.1.5 on 2021-03-16 14:02
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("currencies", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="currency",
name="created_at",
field=models.DateTimeField(
db_index=True, default=django.utils.timezone.now
),
),
migrations.AlterField(
model_name="currencyvalue",
name="created_at",
field=models.DateTimeField(
db_index=True, default=django.utils.timezone.now
),
),
]
|
from datetime import datetime, timedelta
from django.test import TestCase
from freezegun import freeze_time
from tests.models import TimeStamp, TimeStampWithStatusModel
class TimeStampedModelTests(TestCase):
def test_created(self):
with freeze_time(datetime(2016, 1, 1)):
t1 = TimeStamp.objects.create()
self.assertEqual(t1.created, datetime(2016, 1, 1))
def test_created_sets_modified(self):
'''
Ensure that on creation that modifed is set exactly equal to created.
'''
t1 = TimeStamp.objects.create()
self.assertEqual(t1.created, t1.modified)
def test_modified(self):
with freeze_time(datetime(2016, 1, 1)):
t1 = TimeStamp.objects.create()
with freeze_time(datetime(2016, 1, 2)):
t1.save()
self.assertEqual(t1.modified, datetime(2016, 1, 2))
def test_modified_is_not_cached(self):
with freeze_time(datetime(2016, 1, 1)):
t1 = TimeStamp()
with freeze_time(datetime(2017, 1, 1)):
t2 = TimeStamp()
self.assertNotEqual(t1.modified, t2.modified)
def test_overriding_created_via_object_creation_also_uses_creation_date_for_modified(self):
"""
Setting the created date when first creating an object
should be permissable.
"""
different_date = datetime.today() - timedelta(weeks=52)
t1 = TimeStamp.objects.create(created=different_date)
self.assertEqual(t1.created, different_date)
self.assertEqual(t1.modified, different_date)
def test_overriding_modified_via_object_creation(self):
"""
Setting the modified date explicitly should be possible when
first creating an object, but not thereafter.
"""
different_date = datetime.today() - timedelta(weeks=52)
t1 = TimeStamp.objects.create(modified=different_date)
self.assertEqual(t1.modified, different_date)
self.assertNotEqual(t1.created, different_date)
def test_overriding_created_after_object_created(self):
"""
The created date may be changed post-create
"""
t1 = TimeStamp.objects.create()
different_date = datetime.today() - timedelta(weeks=52)
t1.created = different_date
t1.save()
self.assertEqual(t1.created, different_date)
def test_overriding_modified_after_object_created(self):
"""
The modified date should always be updated when the object
is saved, regardless of attempts to change it.
"""
t1 = TimeStamp.objects.create()
different_date = datetime.today() - timedelta(weeks=52)
t1.modified = different_date
t1.save()
self.assertNotEqual(t1.modified, different_date)
def test_overrides_using_save(self):
"""
The first time an object is saved, allow modification of both
created and modified fields.
After that, only created may be modified manually.
"""
t1 = TimeStamp()
different_date = datetime.today() - timedelta(weeks=52)
t1.created = different_date
t1.modified = different_date
t1.save()
self.assertEqual(t1.created, different_date)
self.assertEqual(t1.modified, different_date)
different_date2 = datetime.today() - timedelta(weeks=26)
t1.created = different_date2
t1.modified = different_date2
t1.save()
self.assertEqual(t1.created, different_date2)
self.assertNotEqual(t1.modified, different_date2)
self.assertNotEqual(t1.modified, different_date)
def test_save_with_update_fields_overrides_modified_provided_within_a(self):
"""
Tests if the save method updated modified field
accordingly when update_fields is used as an argument
and modified is provided
"""
tests = (
['modified'], # list
('modified',), # tuple
{'modified'}, # set
)
for update_fields in tests:
with self.subTest(update_fields=update_fields):
with freeze_time(datetime(2020, 1, 1)):
t1 = TimeStamp.objects.create()
with freeze_time(datetime(2020, 1, 2)):
t1.save(update_fields=update_fields)
self.assertEqual(t1.modified, datetime(2020, 1, 2))
def test_save_is_skipped_for_empty_update_fields_iterable(self):
tests = (
[], # list
(), # tuple
set(), # set
)
for update_fields in tests:
with self.subTest(update_fields=update_fields):
with freeze_time(datetime(2020, 1, 1)):
t1 = TimeStamp.objects.create()
with freeze_time(datetime(2020, 1, 2)):
t1.test_field = 1
t1.save(update_fields=update_fields)
t1.refresh_from_db()
self.assertEqual(t1.test_field, 0)
self.assertEqual(t1.modified, datetime(2020, 1, 1))
def test_save_updates_modified_value_when_update_fields_explicitly_set_to_none(self):
with freeze_time(datetime(2020, 1, 1)):
t1 = TimeStamp.objects.create()
with freeze_time(datetime(2020, 1, 2)):
t1.save(update_fields=None)
self.assertEqual(t1.modified, datetime(2020, 1, 2))
def test_model_inherit_timestampmodel_and_statusmodel(self):
with freeze_time(datetime(2020, 1, 1)):
t1 = TimeStampWithStatusModel.objects.create()
with freeze_time(datetime(2020, 1, 2)):
t1.save(update_fields=['test_field', 'status'])
self.assertEqual(t1.modified, datetime(2020, 1, 2))
|
# A solution to the British Informatics Olympiad 2012 Question 1
# Scores 24/24 Marks
from math import sqrt
n = int(input())
a = int(sqrt(n))
numbers = set(range(2, a))
primes = set()
while numbers:
curr = min(numbers)
primes.add(curr)
numbers.discard(curr)
for i in range(curr*2, a, curr):
numbers.discard(i)
factors = 1
for prime in primes:
if n % prime == 0:
factors = factors * prime
if factors == 1:
factors = n
print factors
|
from setuptools import setup, find_packages
setup(
name='openassetio',
version="0.0.0",
package_dir={'': 'python'},
packages=find_packages(where='python'),
python_requires='>=3.7',
)
|
"""
The best way to open and close the file automatically is the with(type)
"""
with open('with.text','r') as f:
a=f.read()
# with open('with.text','w') as f:
with open('with.text','a') as f:
a=f.write('\nthis is write')
print(a)
|
import math
import weakref
import inspect
import arrow
import numpy as np
from numbers import Number
from collections.abc import Sequence, Mapping
from intervalpy import Interval
from .const import GOLD
from . import util
MIN_STEP = 1e-5
# TODO: Implement Duration and use its next ad previous methods
# Or make a super class which is not tied to a time interval.
_func_obj = None
class Curve:
_token_counter = 0
@classmethod
def empty(cls):
from .empty import Empty
return Empty()
@property
def min_step(self):
return self._min_step
@min_step.setter
def min_step(self, value):
self._min_step = value
@property
def domain(self):
if self.needs_domain_update:
self._domain = self.get_domain()
return self._domain
@property
def update_interval(self):
return self._begin_update_interval
@property
def is_updating(self):
return not self.update_interval.is_empty
def __init__(self, min_step=None):
self.name = None
self._domain = None
self._observer_data = {}
self._ordered_observer_tokens = []
self._begin_update_interval = Interval.empty()
self._end_update_interval = Interval.empty()
self.min_step = min_step
def __call__(self, *args):
return self.y(args[0])
def __repr__(self):
try:
if bool(self.name):
return self.name
return f'{type(self).__name__}("{self.domain}")'
except Exception as e:
return super().__repr__() + f'({e})'
def y(self, x):
raise Exception("Not implemented")
def y_start(self):
return self.y(self.domain.start)
def y_end(self):
return self.y(self.domain.end)
def first_point(self):
d = self.domain
if d.is_empty:
return None
return (d.start, self.y(d.start))
def last_point(self):
d = self.domain
if d.is_empty:
return None
return (d.end, self.y(d.end))
def d_y(self, x, forward=False, min_step=MIN_STEP, limit=None):
min_step = self.resolve_min_step(min_step)
if forward:
x1 = self.x_next(x, min_step=min_step, limit=limit)
else:
x1 = self.x_previous(x, min_step=min_step, limit=limit)
if x1 is None:
return None
y1 = self.y(x1)
if y1 is None:
return None
y = self.y(x)
if y is None:
return None
if x1 == x:
dy = math.inf if y1 >= y else -math.inf
if not forward:
dy = -dy
else:
dy = (y1 - y) / (x1 - x)
return dy
def x(self, y):
raise Exception("Not implemented")
def x_next(self, x, min_step=MIN_STEP, limit=None):
min_step = self.resolve_min_step(min_step)
if math.isinf(min_step):
x1 = self.domain.end
else:
x1 = x + min_step
if limit is not None and x1 > limit:
x1 = limit
if not self.domain.contains(x1, enforce_start=False):
return None
return x1
def x_previous(self, x, min_step=MIN_STEP, limit=None):
min_step = self.resolve_min_step(min_step)
if math.isinf(min_step):
x1 = self.domain.start
else:
x1 = x - min_step
if limit is not None and x1 < limit:
x1 = limit
if not self.domain.contains(x1, enforce_end=False):
return None
return x1
def previous_point(self, x, min_step=MIN_STEP):
x1 = self.x_previous(x, min_step=min_step)
if x1 is None:
return None
y1 = self.y(x1)
return (x1, y1)
def next_point(self, x, min_step=MIN_STEP):
x1 = self.x_next(x, min_step=min_step)
if x1 is None:
return None
y1 = self.y(x1)
return (x1, y1)
def get_domain(self):
return Interval.empty()
def resolve_min_step(self, min_step):
if min_step is None and self.min_step is None:
return None
elif min_step is None:
return self.min_step
elif self.min_step is None:
return min_step
else:
return max(min_step, self.min_step)
def sample_points(self, domain=None, min_step=MIN_STEP, step=None):
min_step = self.resolve_min_step(min_step)
if domain is None:
domain = self.domain
else:
domain = Interval.intersection([self.domain, domain])
if domain.is_empty:
return []
elif not domain.is_finite:
raise Exception("Cannot sample points on an infinite domain {}. Specify a finite domain.".format(domain))
x_start, x_end = domain
x_end_bin = round(x_end / min_step) if min_step is not None else x_end
if domain.start_open:
points = []
else:
points = [(x_start, self.y(x_start))]
if step is not None:
x = x_start + step
while x <= x_end:
y = self.y(x)
points.append((x, y))
x += step
elif min_step is not None and min_step > 0:
x = self.x_next(x_start, min_step=min_step, limit=x_end)
while x is not None and x <= x_end:
y = self.y(x)
points.append((x, y))
x_bin = round(x / min_step) if min_step is not None else x
if x_bin == x_end_bin:
break
x1 = self.x_next(x, min_step=min_step, limit=x_end)
if x1 is not None:
x1_bin = round(x1 / min_step) if min_step is not None else x1
if x1_bin <= x_bin:
raise Exception('Next x value {} should be greater than the previous x value {} by at least the minimum step of {}'.format(x1, x, min_step))
x = x1
if not domain.end_open and points[-1][0] != x_end:
points.append((x_end, self.y(x_end)))
else:
raise Exception("Bad functions sample parameters.")
return points
def sample_points_from_x(self, x, limit, backward=False, open=False, min_step=None):
assert limit is not None
if limit < 0:
limit = -limit
backward = not backward
min_step = self.resolve_min_step(min_step)
points = []
x1 = x
i = 0
if not open:
if x is None:
return points
y = self.y(x)
if y is None:
return points
i += 1
while limit is None or i < limit:
if not backward:
x1 = self.x_next(x1, min_step=min_step)
else:
x1 = self.x_previous(x1, min_step=min_step)
if x1 is None:
break
y1 = self.y(x1)
if y1 is None:
break
points.append((x1, y1))
i += 1
return points
def get_range(self, domain=None, **kwargs):
points = self.sample_points(domain=domain, **kwargs)
low = None
high = None
for p in points:
if low is None or p[1] < low:
low = p[1]
if high is None or p[1] > high:
high = p[1]
if low is None or high is None:
return Interval.empty()
return Interval(low, high)
def minimise(self, x, min_step=MIN_STEP, step=None, max_iterations=1000):
x_min = x
x_min_previous = None
iterations = 0
while iterations < max_iterations:
iterations += 1
y = self.y(x_min)
if y is None:
return x_min_previous
dy0 = self.d_y(x_min, forward=False)
dy1 = self.d_y(x_min, forward=True)
forward = True
if dy0 is None and dy1 is None:
return x_min
elif dy0 is None:
if dy1 <= 0:
forward = True
else:
# Sloping into null value
return None
elif dy1 is None:
if dy0 >= 0:
forward = False
else:
# Sloping into null value
return None
else:
if dy0 * dy1 < 0 and dy0 <= 0 and dy1 >= 0:
# Found minimum
return x_min
if dy0 * dy1 < 0:
# Found maximum
forward = abs(dy0) < abs(dy1)
else:
# On slope
forward = dy1 < 0
x_min_previous = x_min
if forward:
if step is not None:
x_min += step
else:
x_min = self.x_next(x_min, min_step=min_step)
else:
if step is not None:
x_min -= step
else:
x_min = self.x_previous(x_min, min_step=min_step)
return x_min
def maximise(self, x, min_step=MIN_STEP, step=None, max_iterations=1000):
x_max = x
x_max_previous = None
iterations = 0
while iterations < max_iterations:
iterations += 1
y = self.y(x_max)
if y is None:
return x_max_previous
dy0 = self.d_y(x_max, forward=False)
dy1 = self.d_y(x_max, forward=True)
forward = True
if dy0 is None and dy1 is None:
return x_max
elif dy0 is None:
if dy1 >= 0:
forward = True
else:
# Sloping into null value
return None
elif dy1 is None:
if dy0 <= 0:
forward = False
else:
# Sloping into null value
return None
else:
if dy0 * dy1 < 0 and dy0 >= 0 and dy1 <= 0:
# Found maximum
return x_max
if dy0 * dy1 < 0:
# Found minimum
forward = abs(dy0) < abs(dy1)
else:
# On slope
forward = dy1 > 0
x_max_previous = x_max
if forward:
if step is not None:
x_max += step
else:
x_max = self.x_next(x_max, min_step=min_step)
else:
if step is not None:
x_max -= step
else:
x_max = self.x_previous(x_max, min_step=min_step)
return x_max
def regression(self, domain=None, min_step=MIN_STEP, step=None):
points = self.sample_points(domain=domain, min_step=min_step, step=step)
for p in points:
if p[1] is None:
return None
count = len(points)
if count < 2:
return None
from .line import Line
if count == 2:
return Line(p1=points[0], p2=points[1])
xy = np.vstack(points)
x = xy[:,0]
y = xy[:,1]
A = np.array([x, np.ones(count)])
# Regression
w = np.linalg.lstsq(A.T, y, rcond=None)[0]
m = w[0]
c = w[1]
return Line(const=c, slope=m)
def add_observer(self, *obj, domain=None, begin=None, end=None, autoremove=False, prioritize=False):
if begin is None and end is None:
return 0
Curve._token_counter += 1
token = Curve._token_counter
domain = Interval.parse(domain, default_inf=True)
obj_ref = None
if len(obj) != 0:
if autoremove:
# Remove observer automatically
obj_ref = weakref.ref(obj[0], lambda _: self.remove_observer(token))
else:
# Calling remove_observer() is required
obj_ref = weakref.ref(obj[0])
elif autoremove:
raise Exception('Autoremoving an observer requires an object')
# Do the callback functions require the domain?
begin_with_interval = False
end_with_interval = False
if begin:
begin_with_interval = util.count_positional_args(begin) == 1
if end:
end_with_interval = util.count_positional_args(end) == 1
# TODO: does saving strong references to callbacks create a retain cycle?
self._observer_data[token] = (obj_ref, domain, begin, end, begin_with_interval, end_with_interval)
if prioritize:
self._ordered_observer_tokens.insert(0, token)
else:
self._ordered_observer_tokens.append(token)
return token
def remove_observer(self, token_or_obj):
if isinstance(token_or_obj, Number):
if token_or_obj in self._observer_data:
del self._observer_data[token_or_obj]
self._ordered_observer_tokens.remove(token_or_obj)
else:
for token in list(self._ordered_observer_tokens):
obj_ref = self._observer_data[token][0]
if obj_ref is not None:
obj = obj_ref()
if obj is None or obj == token_or_obj:
del self._observer_data[token]
self._ordered_observer_tokens.remove(token)
def begin_update(self, domain):
if domain.is_empty or self._begin_update_interval.is_superset_of(domain):
return
self._begin_update_interval = Interval.union([self._begin_update_interval, domain])
for token in self._ordered_observer_tokens:
_, callback_interval, callback, _, callback_with_interval, _ = self._observer_data[token]
if callback_interval is None or domain.intersects(callback_interval):
if callback is not None:
if callback_with_interval:
callback(domain)
else:
callback()
def end_update(self, domain):
if domain.is_empty or self._end_update_interval.is_superset_of(domain):
return
self._end_update_interval = Interval.union([self._end_update_interval, domain])
if not self._end_update_interval.is_superset_of(self._begin_update_interval):
# Keep collecting updates
return
# Updates complete
update_interval = self._end_update_interval
self._begin_update_interval = Interval.empty()
self._end_update_interval = Interval.empty()
self.set_needs_interval_update()
for token in list(self._ordered_observer_tokens):
_, callback_interval, _, callback, _, callback_with_interval = self._observer_data[token]
if callback_interval is None or update_interval.intersects(callback_interval):
if callback is not None:
if callback_with_interval:
callback(update_interval)
else:
callback()
@property
def needs_domain_update(self):
return self._domain is None
def set_needs_interval_update(self):
self._domain = None
def map(self, tfm, skip_none=False, name=None, **kwargs):
from .map import Map
return Map(self, tfm, skip_none=skip_none, name=name, **kwargs)
def accumulator_map(self, tfm, degree, is_period=False, interpolation=None, min_step=MIN_STEP, uniform=True):
from .accumulator_map import AccumulatorMap
return AccumulatorMap(
self,
tfm,
degree,
is_period=is_period,
interpolation=interpolation,
min_step=min_step,
uniform=uniform
)
def offset(self, x, duration=None):
from .offset import Offset
return Offset(self, x, duration=duration)
def add(self, func):
return Curve.add_many([self, func])
def subtract(self, func):
return Curve.subtract_many([self, func])
def multiply(self, func):
return Curve.multiply_many([self, func])
def divide(self, func):
return Curve.divide_many([self, func])
def pow(self, power):
return type(self).pow_many([self, power])
def raised(self, base):
return type(self).pow_many([base, self])
def log(self, base=math.e):
return type(self).log_many([self, base])
def integral(self, const=0, interpolation=None, uniform=True):
from .integral import Integral
return Integral(self, const=const, interpolation=interpolation, uniform=uniform)
def additive_inverse(self):
return self.map(_additive_inverse)
def multiplicative_inverse(self):
return self.map(_multiplicative_inverse)
def abs(self):
return self.map(_abs)
def blend(self, func, x_blend_start, x_blend_stop):
from .aggregate import Aggregate
from .piecewise import Piecewise
x_blend_period = x_blend_stop - x_blend_start
def blend_f(x, ys):
u = (x - x_blend_start) / x_blend_period
return (1.0 - u) * ys[0] + u * ys[1]
c = Aggregate([self, func], tfm=blend_f, name='blend')
funcs = [self, c, func]
domains = self.domain.partition([x_blend_start, x_blend_stop])
return Piecewise(funcs, domains)
def extension(self, name, start=False, end=True, raise_on_empty=False, **kwds):
from .extension import ConstantExtension
from .extension import TangentExtension
from .extension import SinExtension
classes = [
ConstantExtension,
TangentExtension,
SinExtension,
]
for c in classes:
if c.name == name:
return c(self, start=start, end=end, raise_on_empty=raise_on_empty, **kwds)
raise Exception('Unknown extension type')
# def wave_extended(self, ref_func, min_deviation=0, start=None, step=None, min_step=MIN_STEP):
# if self.domain.is_positive_infinite:
# return self
# ref_func = Curve.parse(ref_func)
# extremas = Extremas(self, ref_func, min_deviation=min_deviation, start=start, step=step, min_step=min_step)
# def mom(self, degree, duration, **kwargs):
# """
# Returns the momentum of the reciever.
# The degree corresponds to the number of steps to take.
# """
# degree = int(degree)
# if degree < 1:
# raise ValueError(f'Momentum requires a positive degree, got: {degree}')
# from pyduration import Duration
# duration = Duration.parse(duration)
# def _mom(x, y):
# if y is None:
# return None
# # step back
# x0 = duration.step(x, -degree)
# y0 = self.y(x0)
# if y0 is None:
# return None
# return y - y0
# return self.map(_mom, name=f'mom({degree})', **kwargs)
def sma(self, degree, is_period=False, **kwargs):
from .sma import SMA
return SMA(self, degree, is_period=is_period, **kwargs)
def ema(self, degree, is_period=False, init=None, **kwargs):
from .ema import EMA
return EMA(self, degree, is_period=is_period, init=init, **kwargs)
def smma(self, degree, **kwargs):
from .sma import SMA
from .ema import EMA
sma = SMA(self, degree, is_period=False, **kwargs)
ema = EMA(self, 1 / degree, is_period=False, init=sma, **kwargs)
return ema
def harmonic_smas(self, base_degree, count, stride=1, is_period=False, **kwargs):
"""
Returns `count` SMAs from small to large. Their degrees
are proportional to the golden ratio.
"""
periods = []
smas = []
step = stride + 1
for i in range(count):
period = base_degree * GOLD ** float(i * step)
period = round(period / base_degree) * base_degree
periods.append(period)
for i in range(count):
period = periods[i]
sma = self.sma(period, is_period=is_period, **kwargs)
smas.append(sma)
return smas
def centered_macs(self, base_degree, count, stride=1, is_period=False, **kwargs):
periods = []
smas = []
step = stride + 1
for i in range(count):
period = base_degree * GOLD ** float(i * step)
period = round(period / base_degree) * base_degree
periods.insert(0, period)
for i in range(count):
period = periods[i]
sma = self.sma(period, is_period=is_period, **kwargs)
smas.append(sma)
return smas
def rsi(self, degree, **kwargs):
d = self.differential()
du = Curve.max([d, 0], ignore_empty=False)
dd = Curve.max([-d, 0], ignore_empty=False)
rs = du.ema(1 / degree, **kwargs) / dd.ema(1 / degree, **kwargs)
rsi = 100 - 100 / (1 + rs)
rsi.name = f'rsi({degree})'
return rsi
def trailing_min(self, degree, is_period=False, interpolation=None, min_step=MIN_STEP, uniform=True):
return self.accumulator_map(
min,
degree,
is_period=is_period,
interpolation=interpolation,
min_step=min_step,
uniform=uniform
)
def trailing_max(self, degree, is_period=False, interpolation=None, min_step=MIN_STEP, uniform=True):
return self.accumulator_map(
max,
degree,
is_period=is_period,
interpolation=interpolation,
min_step=min_step,
uniform=uniform
)
def differential(self, forward=False):
from .map import Map
d = Map(self, lambda x, y: self.d_y(x, forward=forward))
d.name = 'diff'
return d
def subset(self, domain):
from .generic import Generic
return Generic(self, domain=domain, min_step=self.min_step)
@classmethod
def first(cls, funcs, *args):
"""
Return a func which returns the first value which is not `None`.
"""
if not isinstance(funcs, Sequence):
funcs = [funcs] + list(args)
from .aggregate import Aggregate
def first_val(x, vals):
for v in vals:
if v is not None:
return v
return None
funcs = Curve.parse_many(funcs)
return Aggregate(funcs, tfm=first_val, union=True, name='first')
@classmethod
def min(cls, funcs, *args, ignore_empty=False):
if not isinstance(funcs, Sequence):
funcs = [funcs] + list(args)
from .aggregate import Aggregate
def min_vals(x, vals):
best = None
for val in vals:
if best is None or (val is not None and val < best):
best = val
return best
def min_vals_with_empty(x, vals):
return min(filter(lambda y: y is not None, vals), default=None)
funcs = Curve.parse_many(funcs)
t = min_vals_with_empty if ignore_empty else min_vals
return Aggregate(funcs, tfm=t, union=ignore_empty, name='min')
@classmethod
def max(cls, funcs, *args, ignore_empty=False):
if not isinstance(funcs, Sequence):
funcs = [funcs] + list(args)
from .aggregate import Aggregate
def max_vals(x, vals):
best = None
for val in vals:
if best is None or (val is not None and val > best):
best = val
return best
def max_vals_with_empty(x, vals):
return max(filter(lambda y: y is not None, vals), default=None)
funcs = Curve.parse_many(funcs)
t = max_vals_with_empty if ignore_empty else max_vals
return Aggregate(funcs, tfm=t, union=ignore_empty, name='max')
@classmethod
def add_many(cls, funcs, *args):
if not isinstance(funcs, Sequence):
funcs = [funcs] + list(args)
from .aggregate import Aggregate
def add_f(x, ys):
for y in ys:
if y is None:
return None
return sum(ys)
return Aggregate(funcs, tfm=add_f, name='add', operator='+')
@classmethod
def subtract_many(cls, funcs, *args):
if not isinstance(funcs, Sequence):
funcs = [funcs] + list(args)
from .aggregate import Aggregate
def sub_f(x, ys):
result = 0
for i, y in enumerate(ys):
if y is None:
return None
if i == 0:
result = y
else:
result -= y
return result
return Aggregate(funcs, tfm=sub_f, name='sub', operator='-')
@classmethod
def multiply_many(cls, funcs, *args):
if not isinstance(funcs, Sequence):
funcs = [funcs] + list(args)
from .aggregate import Aggregate
def mult_f(x, ys):
geo_sum = 1.0
for y in ys:
if y is None:
return None
geo_sum *= y
return geo_sum
return Aggregate(funcs, tfm=mult_f, name='mult', operator='*')
@classmethod
def divide_many(cls, funcs, *args):
if not isinstance(funcs, Sequence):
funcs = [funcs] + list(args)
from .aggregate import Aggregate
def div_f(x, ys):
result = 0
for i, y in enumerate(ys):
if y is None:
return None
if i == 0:
result = y
elif y == 0:
result = math.inf if result >= 0 else -math.inf
else:
result /= y
return result
return Aggregate(funcs, tfm=div_f, name='div', operator='/')
@classmethod
def pow_many(cls, funcs, *args):
if not isinstance(funcs, Sequence):
funcs = [funcs] + list(args)
from .aggregate import Aggregate
def log_f(x, ys):
result = 0
for i, y in enumerate(ys):
if y is None:
return None
if i == 0:
result = y
else:
result = result ** y
return result
return Aggregate(funcs, tfm=log_f, name='pow', operator='^')
@classmethod
def log_many(cls, funcs, *args):
if not isinstance(funcs, Sequence):
funcs = [funcs] + list(args)
from .aggregate import Aggregate
def log_f(x, ys):
result = 0
for i, y in enumerate(ys):
if y is None:
return None
if i == 0:
result = y
else:
result = math.log(result, y)
return result
return Aggregate(funcs, tfm=log_f, name='log')
@classmethod
def zero(cls, value):
from .constant import Constant
return Constant.zero()
@classmethod
def const(cls, value):
from .constant import Constant
return Constant(value)
@classmethod
def parse(cls, func):
from .generic import Generic
from .constant import Constant
from .points import Points
if func is None:
return None
elif isinstance(func, Curve):
return func
elif callable(func):
return Generic(func)
elif isinstance(func, Number):
return Constant(func)
elif isinstance(func, Sequence):
# Parse points
if len(func) == 0:
return Points(func)
else:
if isinstance(func[0], Sequence):
if len(func[0]) == 2:
return Points(func)
elif isinstance(func, Mapping):
return cls.parse_descriptor(func)
raise Exception('Unable to parse function')
@classmethod
def parse_descriptor(cls, d, fragment=False, current_func=None, decorators=None):
# Example:
# {
# "$line": {
# "points": [
# ["2020-02-12 01:23+1200", 8765.56],
# ["2020-02-30 04:50+1200", 6765.56]
# ]
# }
# }
if decorators is None:
decorators = []
def next_func_constructor(fname):
f = current_func or _func_obj
assert isinstance(f, Curve)
ftype = type(f)
fconstructor = None
fconstructor_from_instance = False
type_method_names = list(map(lambda x: x[0], inspect.getmembers(ftype, predicate=inspect.ismethod)))
f_method_names = list(map(lambda x: x[0], inspect.getmembers(f, predicate=inspect.ismethod)))
if f'{fname}_many' in type_method_names:
fname = f'{fname}_many'
if fname in type_method_names:
def _create_class_fconstructor(fname):
def _class_fconstructor(*args, **kwargs):
f = current_func or _func_obj
fmethod = getattr(type(f), fname)
return fmethod(*args, **kwargs)
return _class_fconstructor
fconstructor = _create_class_fconstructor(fname)
fconstructor_from_instance = False
elif fname in f_method_names:
def _create_fconstructor(fname):
def _fconstructor(*args, **kwargs):
f = current_func or _func_obj
fmethod = getattr(f, fname)
return fmethod(*args, **kwargs)
return _fconstructor
fconstructor = _create_fconstructor(fname)
fconstructor_from_instance = True
else:
raise ValueError(f'Bad function name: {fname}')
return fconstructor, fconstructor_from_instance
if isinstance(d, Mapping):
fragment_vals = {}
for k, v in d.items():
if k.startswith('@'):
# This is an decorator descriptor
oname = k[1:]
decorator_i = len(decorators)
decorators.insert(decorator_i, oname)
v = cls.parse_descriptor(v,
fragment=fragment,
current_func=current_func,
decorators=decorators
)
del decorators[decorator_i]
if oname.startswith('log'):
# Log space has ended, exit log space
# by raising to power
base_str = oname[3:]
base = int(base_str) if bool(base_str) else math.e
v = base ** v
if isinstance(v, Curve):
# Allow chaining
current_func = v
continue
if oname != 'args':
# Only let @args pass through to parent
if len(d) != 1:
raise ValueError(f'A decorator (@...) can only have siblings in a fragment')
return v
elif k.startswith('$'):
# This is function descriptor
fname = k[1:]
fconstructor = None
fconstructor_from_instance = False
if fname == 'const' or fname == 'constant':
from .constant import Constant
fconstructor = Constant
elif fname == 'line':
from .line import Line
fconstructor = Line
elif fname.startswith('log'):
base_str = fname[3:]
base = int(base_str) if bool(base_str) else math.e
def _dot_log(*args, **kwargs):
return current_func.log(**util.extend({ "base": base }, kwargs))
fconstructor = _dot_log
fconstructor_from_instance = True
else:
fconstructor, fconstructor_from_instance = next_func_constructor(fname)
func_args = cls.parse_descriptor(v,
fragment=True,
decorators=decorators
)
args = []
kwargs = {}
if isinstance(func_args, dict):
kwargs = func_args
elif isinstance(func_args, list):
args = func_args
else:
args = [func_args]
# Check for nested args
if '@args' in kwargs:
args = kwargs['@args']
del kwargs['@args']
if fconstructor_from_instance and current_func is None:
current_func = Curve.parse(args[0])
del args[0]
elif not fconstructor_from_instance and current_func is not None:
# Add current function as first argument or to
# list at first argument
if bool(args) and isinstance(args[0], list):
args[0][0:0] = [current_func]
else:
args[0:0] = [current_func]
current_func = fconstructor(*args, **kwargs)
continue
if current_func is not None:
raise Exception(f'Unexpected key after a function: {k}')
if isinstance(v, Mapping):
fragment_vals[k] = cls.parse_descriptor(v,
fragment=True,
decorators=decorators
)
elif isinstance(v, Sequence) and not isinstance(v, (str, bytes)):
fragment_vals[k] = cls.parse_descriptor(v,
fragment=True,
decorators=decorators
)
else:
fragment_vals[k] = v
return current_func or fragment_vals
elif fragment:
if isinstance(d, Mapping):
return {k: cls.parse_descriptor(v,
fragment=True,
decorators=decorators
) for k, v in d.items()}
elif isinstance(d, Sequence) and not isinstance(d, (str, bytes)):
return [cls.parse_descriptor(v,
fragment=True,
decorators=decorators
) for v in d]
elif 'date' in decorators:
return arrow.get(d).timestamp
elif isinstance(d, Number):
if 'log' in decorators:
return math.log(d)
elif 'log2' in decorators:
return math.log(d, 2)
elif 'log10' in decorators:
return math.log(d, 10)
else:
return d
else:
return d
else:
raise TypeError('Unexpected type found while parsing a function')
@classmethod
def parse_many(cls, funcs, *args):
if not isinstance(funcs, Sequence):
funcs = [funcs] + list(args)
return list(map(cls.parse, funcs))
@classmethod
def count_positional_args(cls, f, default=1):
if not callable(f):
raise Exception('Expected callable function')
if inspect.isbuiltin(f):
return default
sig = inspect.signature(f)
count = 0
for param in sig.parameters.values():
if param.kind == inspect.Parameter.POSITIONAL_ONLY or \
param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
count += 1
return count
def __add__(self, other):
return Curve.add_many([self, other])
def __sub__(self, other):
return Curve.subtract_many([self, other])
def __mul__(self, other):
return Curve.multiply_many([self, other])
def __truediv__(self, other):
return Curve.divide_many([self, other])
def __pow__(self, other):
return Curve.pow_many([self, other])
def __radd__(self, other):
return Curve.add_many([other, self])
def __rsub__(self, other):
return Curve.subtract_many([other, self])
def __rmul__(self, other):
return Curve.multiply_many([other, self])
def __rtruediv__(self, other):
return Curve.divide_many([other, self])
def __rpow__(self, other):
return Curve.pow_many([other, self])
def __neg__(self):
return self.additive_inverse()
def __pos__(self):
return self
def __abs__(self):
return self.abs()
def _additive_inverse(x, y):
if y is None:
return None
return -y
def _multiplicative_inverse(x, y):
if y is None:
return None
return 1 / y
def _abs(x, y):
if y is None:
return None
return abs(y)
def _callable_arg_len(f, vararg_ret_val):
args, varargs, _, _ = inspect.getargspec(f)
if varargs is not None:
return vararg_ret_val
arg_len = len(args)
if arg_len == 0:
return 0
if args[0] == 'self':
arg_len -= 1
return arg_len
_func_obj = Curve()
|
/home/runner/.cache/pip/pool/71/fb/dc/ddd9d4c4d76c4a0959ece9968a38e3b0429b9b6157ec9afb70da86d584 |
"""mhc_dashboard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from user_dashboard import views as dashboard
urlpatterns = [
path('entry-employees/', dashboard.add_user_dashboard, name='entry-employees'),
path('entry-companies/', dashboard.add_company, name='entry-companies'),
path('company-lists/result-company/<path:tipe_filter>/<path:keywords>', dashboard.company_lists, name='result-company'),
path('company-lists/result-message/', dashboard.message_lists, name='result-message'),
path('remove-company/<path:dash_id>', dashboard.remove_company, name='remove-company'),
path('employee-lists/result-employee/<path:tipe_filter>/<path:keywords>', dashboard.employee_lists, name='result-employee'),
path('remove-employee/<path:my_email>', dashboard.remove_employee, name='remove-employee'),
path('company-lists/', dashboard.filter_company, name='company-lists'),
path('detail-company/<path:dash_id>', dashboard.comp_detail_view, name='detail-company'),
path('detail-user/<path:email>', dashboard.user_detail_view, name='detail-user'),
path('account-user/', dashboard.my_detail_view, name='account-user'),
path('employee-lists/', dashboard.filter_employee, name='employee-lists'),
path('user-lists/', dashboard.user_lists, name='user-lists'),
path('change-password/', dashboard.change_password_view, name='change-password'),
path('login/', dashboard.login_view, name='login'),
path('logout/', dashboard.logout_view, name='logout'),
path('company-lists/', dashboard.company_lists, name='company-lists'),
path('register/', dashboard.register_view, name='register'),
]
handler404 = 'user_dashboard.views.handler404'
handler500 = 'user_dashboard.views.handler500'
|
name='gMLPhase'
# from .gMLP_utils import *
from .gMLP_torch import *
from .trainer import trainer
from .tester import tester
# from .tester import tester
# from .predictor import predictor
# from .mseed_predictor import mseed_predictor
|
from tempfile import TemporaryDirectory
from finntk.utils import ResourceMan, urlretrieve
import os
# If we don't ensure this, we might end up with a relative path for LEMMA_FILENAME
os.makedirs(os.path.expanduser("~/.conceptnet5"), exist_ok=True)
from conceptnet5.language.lemmatize import LEMMA_FILENAME # noqa
class ConceptNetWiktionaryResMan(ResourceMan):
RESOURCE_NAME = "conceptnet_wiktionary"
URL = "https://archive.org/download/wiktionary.db/wiktionary.db.gz"
def __init__(self):
self._cached_is_bootstrapped = None
super().__init__()
def _is_bootstrapped(self):
if self._cached_is_bootstrapped is not None:
return self._cached_is_bootstrapped
is_boostrapped = os.path.exists(LEMMA_FILENAME)
self._cached_is_bootstrapped = is_boostrapped
return is_boostrapped
def _bootstrap(self, _res=None):
from plumbum.cmd import gunzip
tempdir = TemporaryDirectory()
en_wiktionary_gz = urlretrieve(
self.URL, filename=os.path.join(tempdir.name, "enwiktionary.gz")
)
try:
gunzip(en_wiktionary_gz, LEMMA_FILENAME)
finally:
os.remove(en_wiktionary_gz)
tempdir.cleanup()
self._cached_is_bootstrapped = True
conceptnet_wiktionary = ConceptNetWiktionaryResMan()
|
# -*- coding: utf-8 -*-
"""HDF5 Dataset Generators
The generator class is responsible for yielding batches of images and labels from our HDF5 database.
Attributes:
dataset_path (str):
Path to the HDF5 database that stores our images and corresponding class labels.
batch_size (int):
Size of mini-batches to yield when training our network.
preprocessors (list):
List of image preprocessors we are going to apply (default: None)
augmentation (boole):
If True, then a Keras ImageDataGenerator will be supplied to augment the data directly
inside our HDF5DatasetGenerator (default: None).
binarize (int):
If True, then the labels will be binarized as one-hot encoded vector (default: True)
classes (int):
Number of unique class labels in our database (default: 2).
"""
from keras.utils import np_utils
import numpy as np
import h5py
class HDF5DatasetGenerator:
"""Dataset Generator
"""
def __init__(self, dataset_path, batch_size, preprocessors=None, augmentation=None, binarize=True, classes=2):
"""Initialize the database generatro
Arguments:
dataset_path {str} -- path to the HDF5 database
batch_size {[type]} -- size of mini-batches when training the network
Keyword Arguments:
preprocessors {list} -- list of image preprocessors (default: {None})
augmentation {[bool} -- augment data in HDF5DatasetGenerator (default: {None})
binarize {bool} -- labels will be encoded as one-hot vector (default: {True})
classes {int} -- Number of unique class labels in our database (default: {2})
"""
# store the batch size, preprocessors, and data augmentor, whether or
# not the labels should be binarized, along with the total number of classes
self.batch_size = batch_size
self.preprocessors = preprocessors
self.augmentation = augmentation
self.binarize = binarize
self.classes = classes
# open the HDF5 database for reading and determine the total
# number of entries in the database
self.database = h5py.File(dataset_path)
self.num_images = self.database["labels"].shape[0]
def generator(self, passes=np.inf):
"""Yield batches of images and class labels to the Keras .fit_generator function when
training a network
Keyword Arguments:
passes {int} -- value representing the total number of epochs (default: {np.inf})
"""
# initialize the epoch count
epochs = 0
# keep looping infinitely -- the model will stop once we have
# reach the desired number of epochs
while epochs < passes:
# loop over the HDF5 database
for i in np.arange(0, self.num_images, self.batch_size):
# extract the images and labels from the HDF database
images = self.database["images"][i : i + self.batch_size]
labels = self.database["labels"][i : i + self.batch_size]
# check to see if the labels should be binarized
if self.binarize:
labels = np_utils.to_categorical(labels, self.classes)
# check to see if our preprocessors are not None
if self.preprocessors is not None:
# initialize the list of processed images
processed_images = []
# loop over the images
for image in images:
# loop over the preprocessors and apply each to the image
for preprocessor in self.preprocessors:
image = preprocessor.preprocess(image)
# update the list of processed images
processed_images.append(image)
# update the images array to be the processed images
images = np.array(processed_images)
# if the data augmenator exists, apply it
if self.augmentation is not None:
(images, labels) = next(self.augmentation.flow(images, labels, batch_size=self.batch_size))
# yield a tuple of images and labels
yield (images, labels)
# increment the total number of epochs
epochs += 1
def close(self):
"""Close the database connection
"""
# close the database
self.database.close()
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
k = 15 # Number of neighbors
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
def visualize_knn(n_samples=100, n_features=2):
# Create the linear dataset and estimator
kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_classes': 3,
'n_redundant': 0,
'n_clusters_per_class': 1,
'class_sep': .45,
}
X, y = datasets.make_classification(**kwargs)
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(k, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (k, weights))
plt.show()
if __name__ == '__main__':
visualize_knn()
|
__all__ = ["cli", "sf2heat"] |
# following PEP 386
__version__ = "0.6.7"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Seismic phase picking on continuous data
Input: SAC
Output: SAC
@author: wuyu
"""
import os
import logging
import numpy as np
import tensorflow as tf
from glob import glob
from obspy import read
from ARRU_tools.multitask_build_model import unets
from ARRU_tools.data_utils import PhasePicker
from ARRU_tools.data_utils import sac_len_complement
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
gpu_devices = tf.config.list_physical_devices('GPU')
for device in gpu_devices:
tf.config.experimental.set_memory_growth(device, True)
logging.basicConfig(level=logging.INFO,
format='%(levelname)s : %(asctime)s : %(message)s')
mdl_hdr = 'ARRU_multitask_20s'
### define waveform and output directories
datadir = './wf_data/Ridgecrest_WFs'
outdir = f'./out_data_example/ARRU_pred'
# if you want to remove prediction directories, uncommand this line
os.system(f'rm -rf {outdir}')
### load self-defined model and data process framework
model_h5 = os.path.join(f'pretrained_model/{mdl_hdr}', 'train.hdf5')
### prediction parameters and information
# delta, we use broadband seismometers with sample rate of 100 Hz
dt = 0.01
# data length of model input
pred_npts = 2001
# sliding window length for making predictions
pred_interval_sec = 4
# set `bandpass=None` to disable waveform bandpass filtering
bandpass = [1, 45]
# load model and weights
frame = unets()
model = frame.build_attR2unet(model_h5,
input_size=(pred_npts, 3))
# define post-processing parameters if needed
postprocess_config={
'mask_trigger': [0.1, 0.1],
'mask_len_thre': 0.5,
'mask_err_win':0.5,
'trigger_thre':0.3
}
### initialize continuous data processing framework
# you could specify `postprocess_config=postprocess_config``
# to enable prediction postprocessing
picker = PhasePicker(model=model, pred_npts=pred_npts,
dt=dt, postprocess_config=None)
## find waveform directories
Ddir = np.unique(glob(os.path.join(datadir, '????.???.??')))
for D in range(len(Ddir)):
print(f"Directory: {D+1}/{len(Ddir)}")
## waveform index for reading waveform using `obspy.read` class
## in the next loop;
sacs = glob(os.path.join(Ddir[D], '*.sac'))
wf_idx = np.unique([ '.'.join(
os.path.basename(s).split('.')[:3])[:-1]+'?.'+\
'.'.join(os.path.basename(s).split('.')[3:])
for s in sacs])
for ct, p in enumerate(wf_idx):
logging.info(f"Processing {os.path.join(Ddir[D], wf_idx[ct])}:"
f" {ct+1}/{len(wf_idx)} | Directory: {D+1}/{len(Ddir)}")
wf = read(os.path.join(Ddir[D], wf_idx[ct]))
if bandpass:
wf = wf.detrend('demean').filter('bandpass',
freqmin=bandpass[0], freqmax=bandpass[1])
### complement sac data when the length is not consistent across channels
wf = sac_len_complement(wf)
### phase picking and detection
array_P_med, array_S_med, array_M_med = picker.predict(
wf, postprocess=False)
### check if any infinity or nan (not a number) exists
assert np.any(np.isinf(array_P_med))==False
assert np.any(np.isnan(array_P_med))==False
### write continuous predictions into sac format
outDdir = os.path.join(outdir, os.path.basename(Ddir[D]))
if not os.path.exists(outDdir):
os.makedirs(outDdir)
W_name = wf_idx[ct].replace('?', '')
W_out = [W_name+'.P', W_name+'.S', W_name+'.mask']
W_data = [array_P_med, array_S_med, array_M_med]
W_chn = ['P', 'S', 'mask']
for k in range(3):
out_name = os.path.join(outDdir, W_out[k])
W = wf[0].copy()
W.data = W_data[k]
W.stats.channel = W_chn[k]
W.write(out_name, format='SAC')
|
from .shadow import Shadow
__all__ = ["Shadow"]
|
# Generated from Dynabuffers.g4 by ANTLR 4.7
from antlr4 import *
if __name__ is not None and "." in __name__:
from .DynabuffersParser import DynabuffersParser
else:
from DynabuffersParser import DynabuffersParser
# This class defines a complete generic visitor for a parse tree produced by DynabuffersParser.
class DynabuffersVisitor(ParseTreeVisitor):
# Visit a parse tree produced by DynabuffersParser#compilation.
def visitCompilation(self, ctx:DynabuffersParser.CompilationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by DynabuffersParser#enumType.
def visitEnumType(self, ctx:DynabuffersParser.EnumTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by DynabuffersParser#classType.
def visitClassType(self, ctx:DynabuffersParser.ClassTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by DynabuffersParser#unionType.
def visitUnionType(self, ctx:DynabuffersParser.UnionTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by DynabuffersParser#fieldType.
def visitFieldType(self, ctx:DynabuffersParser.FieldTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by DynabuffersParser#dataType.
def visitDataType(self, ctx:DynabuffersParser.DataTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by DynabuffersParser#arrayType.
def visitArrayType(self, ctx:DynabuffersParser.ArrayTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by DynabuffersParser#optionType.
def visitOptionType(self, ctx:DynabuffersParser.OptionTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by DynabuffersParser#namespaceType.
def visitNamespaceType(self, ctx:DynabuffersParser.NamespaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by DynabuffersParser#classOptions.
def visitClassOptions(self, ctx:DynabuffersParser.ClassOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by DynabuffersParser#unionOptions.
def visitUnionOptions(self, ctx:DynabuffersParser.UnionOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by DynabuffersParser#fieldOptions.
def visitFieldOptions(self, ctx:DynabuffersParser.FieldOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by DynabuffersParser#annotation.
def visitAnnotation(self, ctx:DynabuffersParser.AnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by DynabuffersParser#value.
def visitValue(self, ctx:DynabuffersParser.ValueContext):
return self.visitChildren(ctx)
del DynabuffersParser |
class Track:
_counter = 0
def __init__(self, name, path):
self._name = name
self._path = path
Track._counter += 1
def get_name(self):
return self._name
def get_path(self):
return self._path
@staticmethod
def get_counter():
return Track._counter
|
# coding: utf-8
"""
ELEMENTS API
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from elements_sdk.configuration import Configuration
class TimelineExportRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'project': 'object',
'sequence': 'str',
'format': 'str'
}
attribute_map = {
'project': 'project',
'sequence': 'sequence',
'format': 'format'
}
def __init__(self, project=None, sequence=None, format=None, local_vars_configuration=None): # noqa: E501
"""TimelineExportRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._project = None
self._sequence = None
self._format = None
self.discriminator = None
self.project = project
self.sequence = sequence
self.format = format
@property
def project(self):
"""Gets the project of this TimelineExportRequest. # noqa: E501
:return: The project of this TimelineExportRequest. # noqa: E501
:rtype: object
"""
return self._project
@project.setter
def project(self, project):
"""Sets the project of this TimelineExportRequest.
:param project: The project of this TimelineExportRequest. # noqa: E501
:type: object
"""
if self.local_vars_configuration.client_side_validation and project is None: # noqa: E501
raise ValueError("Invalid value for `project`, must not be `None`") # noqa: E501
self._project = project
@property
def sequence(self):
"""Gets the sequence of this TimelineExportRequest. # noqa: E501
:return: The sequence of this TimelineExportRequest. # noqa: E501
:rtype: str
"""
return self._sequence
@sequence.setter
def sequence(self, sequence):
"""Sets the sequence of this TimelineExportRequest.
:param sequence: The sequence of this TimelineExportRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and sequence is None: # noqa: E501
raise ValueError("Invalid value for `sequence`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
sequence is not None and len(sequence) < 1):
raise ValueError("Invalid value for `sequence`, length must be greater than or equal to `1`") # noqa: E501
self._sequence = sequence
@property
def format(self):
"""Gets the format of this TimelineExportRequest. # noqa: E501
:return: The format of this TimelineExportRequest. # noqa: E501
:rtype: str
"""
return self._format
@format.setter
def format(self, format):
"""Sets the format of this TimelineExportRequest.
:param format: The format of this TimelineExportRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and format is None: # noqa: E501
raise ValueError("Invalid value for `format`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
format is not None and len(format) < 1):
raise ValueError("Invalid value for `format`, length must be greater than or equal to `1`") # noqa: E501
self._format = format
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TimelineExportRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TimelineExportRequest):
return True
return self.to_dict() != other.to_dict()
|
import os
import unittest
from sakuyaclient.NotificationCentre import NotificationCentre
class NotificationCentreTest(unittest.TestCase):
def setUp(self):
self._notifications = NotificationCentre(300)
def test_poll(self):
results = self._notifications.poll()
self.assertIsNotNone(results)
def test_selective_poll(self):
results = self._notifications.poll()
self.assertIsNotNone(results)
def test_multi_poll(self):
# First poll should return diffs
results = self._notifications.poll()
for key in results.keys():
self.assertNotEqual(len(results[key][1]), 0)
# Second poll should return no diffs
results = self._notifications.poll()
for key in results.keys():
self.assertEqual(len(results[key][1]), 0)
if __name__ == '__main__':
unittest.main()
|
import responses
import pytest
from urllib.parse import urlencode
from tests.util import random_str
from tests.util import mock_http_response
from binance.spot import Spot as Client
from binance.error import ParameterRequiredError
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
params = {"loanCoin": "BNB", "collateralCoin": "BTC"}
parameterized_test_data = [
({"loanCoin": "BNB", "collateralCoin": ""}),
({"loanCoin": "", "collateralCoin": "BTC"}),
]
@pytest.mark.parametrize("params", parameterized_test_data)
def test_futures_loan_calc_max_adjust_amount_with_missing_field(params):
"""Tests the API endpoint to Adjust Cross-Collateral LTV without collateralCoin"""
client = Client(key, secret)
client.futures_loan_calc_max_adjust_amount.when.called_with(**params).should.throw(
ParameterRequiredError
)
@mock_http_response(
responses.GET,
"/sapi/v2/futures/loan/calcMaxAdjustAmount\\?" + urlencode(params),
mock_item,
200,
)
def test_futures_loan_calc_max_adjust_amount():
"""Tests the API endpoint to get Adjust Cross-Collateral LTV"""
client = Client(key, secret)
response = client.futures_loan_calc_max_adjust_amount(**params)
response.should.equal(mock_item)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import collections
import json
import os
import time
from functools import wraps
import flask
from flask import (Flask, request, render_template, json as flask_json,
redirect, session, url_for)
from launchpadlib.credentials import Credentials, AccessToken
from launchpadlib.uris import LPNET_WEB_ROOT
from launchpad_reporting import sla_reports
from launchpad_reporting.db import db
from launchpad_reporting.launchpad import (LaunchpadClient,
LaunchpadAnonymousClient)
from launchpad_reporting.launchpad.lpdata import (authorization_url,
SimpleLaunchpad)
path_to_data = "/".join(os.path.abspath(__file__).split('/')[:-1])
with open('{0}/data.json'.format(path_to_data)) as data_file:
data = json.load(data_file)
with open('{0}/file.json'.format(path_to_data)) as teams_file:
teams_data = json.load(teams_file, object_pairs_hook=collections.OrderedDict)
launchpad = LaunchpadAnonymousClient()
app = Flask(__name__)
app_config = sla_reports.read_config_file()
app.secret_key = "lei3raighuequic3Pephee8duwohk8"
def print_select(dct, param, val):
if param not in dct or val not in dct[param]:
return ""
return "selected=\"selected\""
def get_report_by_name(name):
for report in app_config['reports']:
if report['name'] == name:
return report
raise RuntimeError('Can not find report %s!' % name)
def filter(request, bugs):
filters = {
'status': request.args.getlist('status'),
'importance': request.args.getlist('importance'),
'assignee': request.args.getlist('assignee'),
'criteria': request.args.getlist('criteria'),
'tags': request.args.getlist('tags'),
'created_from': request.args.get('created_from'),
'created_to': request.args.get('created_to'),
'triaged_from': request.args.get('triaged_from'),
'triaged_to': request.args.get('triaged_to'),
'fix_committed_from': request.args.get('fix_committed_from'),
'fix_committed_to': request.args.get('fix_committed_to'),
'fix_released_from': request.args.get('fix_released_from'),
'fix_released_to': request.args.get('fix_released_to'),
}
teams_data['Unknown'] = {'unknown': []}
if 'tab_name' in request.args and request.args['tab_name'] in teams_data:
filters['assignee'] = teams_data[request.args['tab_name']]
bugs = launchpad.lpdata.filter_bugs(bugs, filters, teams_data)
return bugs, filters
KEY_MILESTONE = "6.1"
MILESTONES = db.bugs.milestones.find_one()["Milestone"]
flag = False
user_agents = {}
app.jinja_env.globals.update(print_select=print_select,
get_report_by_name=get_report_by_name,
app_config=app_config,
key_milestone=KEY_MILESTONE,
get_update_time=launchpad.get_update_time)
def get_access_token(credentials):
global user_agents
credentials._request_token = AccessToken.from_params(
session['request_token_parts'])
request_token_key = credentials._request_token.key
try:
credentials.exchange_request_token_for_access_token(LPNET_WEB_ROOT)
except Exception as e:
return (False, None, False)
user_agents[credentials.access_token.key] = LaunchpadClient(credentials)
session['access_token_parts'] = {
'oauth_token': credentials.access_token.key,
'oauth_token_secret': credentials.access_token.secret,
'lp.context': credentials.access_token.context
}
is_authorized = True
session['is_authorized'] = is_authorized
del session['request_token_parts']
return (False, None, is_authorized)
def use_access_token(credentials):
global user_agents
if not session['access_token_parts']['oauth_token'] in user_agents:
credentials.access_token = AccessToken.from_params(
session['access_token_parts'])
user_agents[credentials.access_token.key] = LaunchpadClient(credentials)
is_authorized = True
session['is_authorized'] = is_authorized
return (False, None, is_authorized)
def get_and_authorize_request_token(credentials):
credentials.get_request_token(
web_root=LPNET_WEB_ROOT)
request_token_key = credentials._request_token.key
request_token_secret = credentials._request_token.secret
request_token_context = credentials._request_token.context
session['request_token_parts'] = {
'oauth_token': request_token_key,
'oauth_token_secret': request_token_secret,
'lp.context': request_token_context
}
auth_url = authorization_url(LPNET_WEB_ROOT,
request_token=request_token_key)
is_authorized = False
session['is_authorized'] = is_authorized
return (True, auth_url, is_authorized)
def process_launchpad_authorization():
global user_agents
credentials = Credentials()
SimpleLaunchpad.set_credentials_consumer(credentials,
"launchpad-reporting-www")
if 'should_authorize' in session and session['should_authorize']:
if 'request_token_parts' in session:
return get_access_token(credentials)
elif 'access_token_parts' in session:
return use_access_token(credentials)
else:
return get_and_authorize_request_token(credentials)
else:
return (False, None, False)
def handle_launchpad_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
should_redirect, lp_url, is_authorized = process_launchpad_authorization()
if should_redirect:
return redirect(lp_url)
try:
kwargs.update({'is_authorized': is_authorized})
return f(*args, **kwargs)
except Exception as e:
if hasattr(e, "content") and "Expired token" in e.content:
if 'access_token_parts' in session:
del session['access_token_parts']
session['should_authorize'] = False
kwargs.update({'is_authorized': False})
return f(*args, **kwargs)
else:
raise e
return decorated
@app.route('/project/<project_name>/bug_table_for_status/<bug_type>/'
'<milestone_name>/bug_list/')
@handle_launchpad_auth
def bug_list(project_name, bug_type, milestone_name, is_authorized=False):
project = launchpad.get_project(project_name)
tags = None
if 'tags' in request.args:
tags = request.args['tags'].split(',')
if bug_type == "New":
milestone_name = None
bugs = launchpad.get_bugs(
project_name=project_name,
statuses=launchpad.BUG_STATUSES[bug_type],
milestone_name=milestone_name, tags=tags)
return render_template("bug_list.html",
is_authorized=is_authorized,
project=project,
bugs=bugs,
bug_type=bug_type,
milestone_name=milestone_name,
selected_bug_table=True,
prs=list(db.prs))
@app.route('/project/<project_name>/bug_list_for_sbpr/<milestone_name>/'
'<bug_type>/<sbpr>')
@handle_launchpad_auth
def bug_list_for_sbpr(project_name, bug_type, milestone_name, sbpr, is_authorized=False):
subprojects = [sbpr]
if sbpr == 'all':
subprojects = list(db.subprs)
bug_importance = []
bug_statuses = ""
bugs_type_to_print = ""
if bug_type == "done":
bugs_type_to_print = "Closed"
bug_statuses = "Closed"
if bug_type == "total":
bugs_type_to_print = "Total"
bug_statuses = "All"
if bug_type == "high":
bugs_type_to_print = "High and Critical"
bug_statuses = "NotDone"
bug_importance = ["High", "Critical"]
if bug_type == "incomplete":
bugs_type_to_print = "Incomplete"
bug_statuses = "Incomplete"
bugs = list(set(launchpad.get_bugs(project_name=project_name,
statuses=launchpad.
BUG_STATUSES[bug_statuses],
milestone_name=milestone_name,
tags=subprojects,
importance=bug_importance)))
return render_template("bug_table_sbpr.html",
is_authorized=is_authorized,
project=project_name,
prs=list(db.prs),
bugs=bugs,
sbpr=sbpr,
milestone_name=milestone_name,
milestones=MILESTONES,
bugs_type_to_print=bugs_type_to_print)
@app.route('/project/<project_name>/api/release_chart_trends/'
'<milestone_name>/get_data')
@handle_launchpad_auth
def bug_report_trends_data(project_name, milestone_name, is_authorized=False):
data = launchpad.release_chart(
project_name,
milestone_name
).get_trends_data()
return flask_json.dumps(data)
@app.route('/project/<project_name>/api/release_chart_incoming_outgoing/'
'<milestone_name>/get_data')
@handle_launchpad_auth
def bug_report_get_incoming_outgoing_data(project_name, milestone_name, is_authorized=False):
data = launchpad.release_chart(
project_name,
milestone_name
).get_incoming_outgoing_data()
return flask_json.dumps(data)
@app.route('/project/<project_name>/bug_table_for_status/'
'<bug_type>/<milestone_name>')
@handle_launchpad_auth
def bug_table_for_status(project_name, bug_type, milestone_name, is_authorized=False):
project = launchpad.get_project(project_name)
if bug_type == "New":
milestone_name = None
return render_template("bug_table.html",
is_authorized=is_authorized,
project=project,
prs=list(db.prs),
milestone_name=milestone_name)
@app.route('/project/<project_name>/bug_trends/<milestone_name>/')
@handle_launchpad_auth
def bug_trends(project_name, milestone_name, is_authorized=False):
project = launchpad.get_project(project_name)
return render_template("bug_trends.html",
is_authorized=is_authorized,
project=project,
milestone_name=milestone_name,
selected_bug_trends=True,
prs=list(db.prs))
def milestone_based_report(report):
@handle_launchpad_auth
def handle_report(milestone_name, is_authorized):
user_agent = None
if is_authorized:
oauth_token = session['access_token_parts']['oauth_token']
user_agent = user_agents[oauth_token]
bugs = sla_reports.get_reports_data(report['name'], ['mos', 'fuel'],
milestone_name, user_agent)
bugs, filters = filter(request, bugs)
return flask.render_template(
"bugs_lifecycle_report.html",
is_authorized=is_authorized,
report=report,
milestone_name=milestone_name,
milestones=MILESTONES,
all_bugs=bugs,
teams=teams_data,
filters=filters,
)
return handle_report
def project_based_report(report):
@handle_launchpad_auth
def handle_report(project, is_authorized):
user_agent = None
if is_authorized:
oauth_token = session['access_token_parts']['oauth_token']
user_agent = user_agents[oauth_token]
bugs = sla_reports.get_reports_data(report['name'], [project], None, user_agent)
bugs, filters = filter(request, bugs)
return flask.render_template(
"bugs_lifecycle_report.html",
is_authorized=is_authorized,
report=report,
all_bugs=bugs,
teams=teams_data,
filters=filters,
)
return handle_report
for report in app_config['reports']:
if report['parameter'] == 'milestone':
handler = milestone_based_report(report)
url = '/%s/<milestone_name>' % report['name']
elif report['parameter'] == 'project':
handler = project_based_report(report)
url = '/%s/<project>' % report['name']
else:
raise RuntimeError('Invalid report parameter: %s' % report['parameter'])
app.add_url_rule(url, report['name'], handler)
@app.route('/project/<project_name>/<milestone_name>/project_statistic/<tag>/')
@handle_launchpad_auth
def statistic_for_project_by_milestone_by_tag(project_name, milestone_name,
tag, is_authorized=False):
display = True
project = launchpad.get_project(project_name)
project.display_name = project.display_name.capitalize()
page_statistic = launchpad.common_statistic_for_project(
project_name=project_name,
tag=tag,
milestone_name=[milestone_name])
milestone = dict.fromkeys(["name", "id"])
milestone["name"] = milestone_name
milestone["id"] = data[project_name][milestone_name]
if project_name == "fuel":
milestone["id"] = data[project_name][milestone_name]
return render_template("project.html",
is_authorized=is_authorized,
project=project,
selected_overview=True,
display_subprojects=display,
prs=list(db.prs),
subprs=list(db.subprs),
page_statistic=page_statistic,
milestone=milestone,
flag=True,
tag=tag)
@app.route('/project/<project_name>/<milestone_name>/project_statistic/')
@handle_launchpad_auth
def statistic_for_project_by_milestone(project_name, milestone_name, is_authorized=False):
display = False
project = launchpad.get_project(project_name)
if project_name in ("mos", "fuel"):
display = True
project.display_name = project.display_name.capitalize()
page_statistic = launchpad.common_statistic_for_project(
project_name=project_name,
tag=None,
milestone_name=[milestone_name])
milestone = dict.fromkeys(["name", "id"])
milestone["name"] = milestone_name
milestone["id"] = data[project_name][milestone_name]
if project_name == "fuel":
milestone["id"] = data[project_name][milestone_name]
return render_template("project.html",
is_authorized=is_authorized,
project=project,
selected_overview=True,
display_subprojects=display,
prs=list(db.prs),
subprs=list(db.subprs),
page_statistic=page_statistic,
milestone=milestone,
flag=True)
@app.route('/project/fuelplusmos/<milestone_name>/')
@handle_launchpad_auth
def fuel_plus_mos_overview(milestone_name, is_authorized=False):
milestones = db.bugs.milestones.find_one()["Milestone"]
subprojects = list(db.subprs)
page_statistic = dict.fromkeys(subprojects)
for sbpr in subprojects:
page_statistic["{0}".format(sbpr)] = dict.fromkeys(["fuel", "mos"])
for pr in ("fuel", "mos"):
page_statistic["{0}".format(sbpr)]["{0}".format(pr)] = \
dict.fromkeys(["done", "total", "high"])
page_statistic["{0}".format(sbpr)]["{0}".format(pr)]["done"] = \
len(launchpad.get_bugs(
project_name=pr,
statuses=launchpad.BUG_STATUSES["Closed"],
milestone_name=milestone_name,
tags=[sbpr]))
page_statistic["{0}".format(sbpr)]["{0}".format(pr)]["total"] = \
len(launchpad.get_bugs(
project_name=pr,
statuses=launchpad.BUG_STATUSES["All"],
milestone_name=milestone_name,
tags=[sbpr]))
page_statistic["{0}".format(sbpr)]["{0}".format(pr)]["high"] = \
len(launchpad.get_bugs(
project_name=pr,
statuses=launchpad.BUG_STATUSES["NotDone"],
milestone_name=milestone_name,
tags=[sbpr],
importance=["High", "Critical"]))
fuel_plus_mos = dict.fromkeys(subprojects)
for subpr in subprojects:
fuel_plus_mos["{0}".format(subpr)] = dict.fromkeys(["done",
"total",
"high"])
for subpr in subprojects:
tag = ["{0}".format(subpr)]
summary = launchpad.bugs_ids(tag, milestone_name)
fuel_plus_mos["{0}".format(subpr)]["done"] = summary["done"]
fuel_plus_mos["{0}".format(subpr)]["total"] = summary["total"]
fuel_plus_mos["{0}".format(subpr)]["high"] = summary["high"]
summary_statistic = dict.fromkeys("summary")
summary_statistic["summary"] = dict.fromkeys(["tags", "others"])
for criterion in ["tags", "others"]:
summary_statistic["summary"][criterion] = dict.fromkeys(
["fuel", "mos", "fuel_mos"])
for criterion in ["tags", "others"]:
if criterion == "others":
condition = True
else:
condition = False
for pr in ("fuel", "mos"):
summary_statistic["summary"][criterion]["{0}".format(pr)] = \
dict.fromkeys(["done", "total", "high"])
summary_statistic[
"summary"][criterion]["{0}".format(pr)]["done"] = \
len(launchpad.get_bugs(
project_name=pr,
statuses=launchpad.BUG_STATUSES["Closed"],
milestone_name=milestone_name,
tags=subprojects,
condition=condition))
summary_statistic[
"summary"][criterion]["{0}".format(pr)]["total"] = \
len(launchpad.get_bugs(
project_name=pr,
statuses=launchpad.BUG_STATUSES["All"],
milestone_name=milestone_name,
tags=subprojects,
condition=condition))
summary_statistic[
"summary"][criterion]["{0}".format(pr)]["high"] = \
len(launchpad.get_bugs(
project_name=pr,
statuses=launchpad.BUG_STATUSES["NotDone"],
milestone_name=milestone_name,
tags=subprojects,
importance=["High", "Critical"],
condition=condition))
for criterion in ["tags", "others"]:
summary_statistic["summary"][criterion]["fuel_mos"] = \
dict.fromkeys(["done", "total", "high"])
for state in ["done", "total", "high"]:
summary_statistic[
"summary"][criterion]["fuel_mos"]["{0}".format(state)] = 0
for state in ["done", "total", "high"]:
for subpr in subprojects:
summary_statistic[
"summary"]["tags"]["fuel_mos"]["{0}".format(state)] +=\
fuel_plus_mos["{0}".format(subpr)]["{0}".format(state)]
summary_statistic[
"summary"]["others"]["fuel_mos"]["{0}".format(state)] = \
summary_statistic[
"summary"]["others"]["fuel"]["{0}".format(state)] + \
summary_statistic["summary"]["others"]["mos"]["{0}".format(state)]
incomplete = dict.fromkeys("fuel", "mos")
for pr in ("fuel", "mos"):
incomplete['{0}'.format(pr)] = \
len(launchpad.get_bugs(
project_name=pr,
statuses=["Incomplete"],
milestone_name=milestone_name,
tags=subprojects))
return render_template("project_fuelmos.html",
is_authorized=is_authorized,
milestones=milestones,
current_milestone=milestone_name,
prs=list(db.prs),
subprs=list(db.subprs),
fuel_milestone_id=data["fuel"][
milestone_name],
mos_milestone_id=data["mos"][milestone_name],
page_statistic=page_statistic,
summary_statistic=summary_statistic,
fuel_plus_mos=fuel_plus_mos,
all_tags="+".join(db.subprs),
incomplete=incomplete)
@app.route('/project/<project_name>/')
@handle_launchpad_auth
def project_overview(project_name, is_authorized=False):
should_redirect, lp_url, is_authorized = process_launchpad_authorization()
if should_redirect:
return redirect(lp_url)
project_name = project_name.lower()
if project_name == "fuelplusmos":
return redirect(
"/project/fuelplusmos/{0}/".format(KEY_MILESTONE), code=302)
project = launchpad.get_project(project_name)
project.display_name = project.display_name.capitalize()
page_statistic = launchpad.common_statistic_for_project(
project_name=project_name,
milestone_name=project.active_milestones,
tag=None)
return render_template("project.html",
is_authorized=is_authorized,
project=project,
selected_overview=True,
prs=list(db.prs),
subprs=list(db.subprs),
page_statistic=page_statistic,
milestone=[])
@app.route('/project/<global_project_name>/<tag>/')
@handle_launchpad_auth
def mos_project_overview(global_project_name, tag, is_authorized=False):
global_project_name = global_project_name.lower()
tag = tag.lower()
project = launchpad.get_project(global_project_name)
page_statistic = launchpad.common_statistic_for_project(
project_name=global_project_name,
milestone_name=project.active_milestones,
tag=tag)
return render_template("project.html",
is_authorized=is_authorized,
project=project,
tag=tag,
page_statistic=page_statistic,
selected_overview=True,
display_subprojects=True,
prs=list(db.prs),
subprs=list(db.subprs),
milestone=[])
@app.route('/logout', methods=['GET', 'POST'])
def logout():
if 'request_token_parts' in session:
del session['request_token_parts']
if 'access_token_parts' in session:
del session['access_token_parts']
session['should_authorize'] = False
return redirect(url_for('main_page'))
@app.route('/login', methods=["GET", "POST"])
def login(is_authorized=False):
if 'request_token_parts' in session:
del session['request_token_parts']
session['should_authorize'] = True
return redirect(url_for('main_page'))
@app.route('/common_statistic', methods=["GET", "POST"])
@handle_launchpad_auth
def common_statistic_page(is_authorized=False):
global_statistic = dict.fromkeys(db.prs)
for pr in global_statistic.keys()[:]:
types = dict.fromkeys(["total", "critical", "unresolved"])
types["total"] = len(launchpad.get_bugs(
project_name=pr, statuses=launchpad.BUG_STATUSES["All"]))
types["critical"] = len(launchpad.get_bugs(
project_name=pr,
statuses=launchpad.BUG_STATUSES["NotDone"],
importance=["Critical"]))
types["unresolved"] = len(launchpad.get_bugs(
project_name=pr,
statuses=launchpad.BUG_STATUSES["NotDone"]))
global_statistic['{0}'.format(pr)] = types
return render_template("common_statistic.html",
key_milestone=KEY_MILESTONE,
is_authorized=is_authorized,
statistic=global_statistic,
prs=list(db.prs))
@app.route('/', methods=["GET", "POST"])
@handle_launchpad_auth
def main_page(is_authorized=False):
return render_template("main.html",
is_authorized=is_authorized)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
dest="action", help='actions'
)
run_parser = subparsers.add_parser(
'run', help='run application locally'
)
run_parser.add_argument(
'-p', '--port', dest='port', action='store', type=str,
help='application port', default='80'
)
run_parser.add_argument(
'-H', '--host', dest='host', action='store', type=str,
help='application host', default='0.0.0.0'
)
params, args = parser.parse_known_args()
app.run(
debug=True,
host=params.host,
port=int(params.port),
use_reloader=True,
threaded=True
)
|
#!/usr/bin/env python3
import uavcan, time
# Waiting until new nodes stop appearing online.
# That would mean that all nodes that are connected to the bus are now online and ready to work.
def wait_for_all_nodes_to_become_online():
num_nodes = 0
while True:
node.spin(timeout=10)
new_num_nodes = len(dynamic_node_id_allocator.get_allocation_table())
if new_num_nodes == num_nodes and num_nodes > 1:
break
num_nodes = new_num_nodes
# Determining how many ESC nodes are present.
# In real use cases though the number of ESC should be obtained from elsewhere, e.g. from control mixer settings.
# There is a helper class in PyUAVCAN that allows one to automate what we're doing here,
# but we're not using it for the purposes of greater clarity of what's going on on the protocol level.
def detect_esc_nodes():
esc_nodes = set()
handle = node.add_handler(uavcan.equipment.esc.Status, lambda event: esc_nodes.add(event.transfer.source_node_id))
try:
node.spin(timeout=3) # Collecting ESC status messages, thus determining which nodes are ESC
finally:
handle.remove()
return esc_nodes
# ----- Removed: enumeration ----
if __name__ == '__main__':
# Initializing a UAVCAN node instance.
# In this example we're using an SLCAN adapter on the port '/dev/ttyACM0'.
# PyUAVCAN also supports other types of adapters, refer to its docs to learn more.
node = uavcan.make_node('/dev/ttyACM0', node_id=10, bitrate=1000000)
# Initializing a dynamic node ID allocator.
# This would not be necessary if the nodes were configured to use static node ID.
node_monitor = uavcan.app.node_monitor.NodeMonitor(node)
dynamic_node_id_allocator = uavcan.app.dynamic_node_id.CentralizedServer(node, node_monitor)
print('Waiting for all nodes to appear online, this should take less than a minute...')
wait_for_all_nodes_to_become_online()
print('Online nodes:', [node_id for _, node_id in dynamic_node_id_allocator.get_allocation_table()])
print('Detecting ESC nodes...')
esc_nodes = detect_esc_nodes()
print('ESC nodes:', esc_nodes)
|
#################################################################
# #
# Wilfred #
# Copyright (C) 2020-2021, Vilhelm Prytz, <vilhelm@prytznet.se> #
# #
# Licensed under the terms of the MIT license, see LICENSE. #
# https://github.com/wilfred-dev/wilfred #
# #
#################################################################
from functools import wraps
from wilfred.message_handler import error
from wilfred.api.config_parser import Config, NoConfiguration
config = Config()
try:
config.read()
except NoConfiguration:
pass
def configuration_present(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not config.configuration:
error("Wilfred has not been configured", exit_code=1)
return f(*args, **kwargs)
return decorated_function
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.