content stringlengths 5 1.05M |
|---|
import argparse
import csv
import os
from utilities import create_folder
def dcase2017task4(args):
"""Create black list. Black list is a list of audio ids that will be
skipped in training.
"""
# Augments & parameters
workspace = args.workspace
# Black list from DCASE 2017 Task 4
test_weak_csv = 'metadata/black_list/groundtruth_weak_label_testing_set.csv'
evaluation_weak_csv = 'metadata/black_list/groundtruth_weak_label_evaluation_set.csv'
black_list_csv = os.path.join(workspace, 'black_list', 'dcase2017task4.csv')
create_folder(os.path.dirname(black_list_csv))
def get_id_sets(csv_path):
with open(csv_path, 'r') as fr:
reader = csv.reader(fr, delimiter='\t')
lines = list(reader)
ids_set = []
for line in lines:
"""line: ['-5QrBL6MzLg_60.000_70.000.wav', '60.000', '70.000', 'Train horn']"""
ids_set.append(line[0][0 : 11])
ids_set = list(set(ids_set))
return ids_set
test_ids_set = get_id_sets(test_weak_csv)
evaluation_ids_set = get_id_sets(evaluation_weak_csv)
full_ids_set = test_ids_set + evaluation_ids_set
# Write black list
fw = open(black_list_csv, 'w')
for id in full_ids_set:
fw.write('{}\n'.format(id))
print('Write black list to {}'.format(black_list_csv))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
subparsers = parser.add_subparsers(dest='mode')
parser_dcase2017task4 = subparsers.add_parser('dcase2017task4')
parser_dcase2017task4.add_argument('--workspace', type=str, required=True)
args = parser.parse_args()
if args.mode == 'dcase2017task4':
dcase2017task4(args)
else:
raise Exception('Error argument!') |
{
"targets": [{
"target_name": "missing"
}]
}
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import logging
import os
import tempfile
import shutil
import subprocess
TOPIC_NAME = 'cron-stop-instances'
FUNCTION_SOURCE = """
import base64
import datetime
from dateutil.parser import parse
import googleapiclient.discovery
import os
def StopOldInstances(data, context):
if 'data' not in data or not data['data']:
print('StopAllInstances MUST receive the Compute zone in `data`')
return
project = os.environ['GCP_PROJECT']
zone = base64.b64decode(data['data']).decode()
print('Executing StopOldInstances: project=%s ; zone=%s' % (project, zone))
service = googleapiclient.discovery.build('compute', 'v1')
request = service.instances().list(project=project, zone=zone)
response = request.execute()
now = datetime.datetime.now(datetime.timezone.utc)
if 'items' in response:
for instance in response['items']:
name = instance['name']
age = now - parse(instance['creationTimestamp'])
if age > datetime.timedelta(days=1):
print("Stopping %s because it's old enough: %s" % (name, age))
r = service.instances().stop(project=project, zone=zone, instance=name)
r.execute()
else:
print("Skipping %s because it's not old enough: %s" % (name, age))
"""
def ParseArgs():
parser = argparse.ArgumentParser(
description='Configures a GCP project to stop old compute instances automatically.'
)
parser.add_argument(
'--project',
metavar='<project>',
dest="project",
required=True,
help='The id of the project which to configure.')
return parser.parse_args()
def ConfigureLogging(args):
logfmt = '%(asctime)s %(filename)s:%(lineno)s: [%(levelname)s] %(message)s'
datefmt = '%Y/%m/%d %H:%M:%S'
logging.basicConfig(level=logging.INFO, format=logfmt, datefmt=datefmt)
if __name__ == '__main__':
args = ParseArgs()
ConfigureLogging(args)
logging.info("Arguments: %s" % args)
env = dict(os.environ, CLOUDSDK_CORE_PROJECT=args.project)
# Enable required APIs: Scheduler, Pub/Sub & Functions
services = ['appengine', 'cloudfunctions', 'cloudscheduler', 'pubsub']
for service in services:
p = subprocess.Popen(
['gcloud', 'services', 'enable', service + ".googleapis.com"], env=env)
output, err = p.communicate()
logging.info("Enable %s: %s -- %s" % (service, output, err))
# Create a Pub/Sub topic to listen to
p = subprocess.Popen(['gcloud', 'pubsub', 'topics', 'create', TOPIC_NAME],
env=env)
output, err = p.communicate()
logging.info("Topics Create: %s -- %s" % (output, err))
# Create the StopOldInstance function to execute
tempDir = tempfile.mkdtemp()
with open(os.path.join(tempDir, 'main.py'), 'w') as f:
f.write(FUNCTION_SOURCE)
with open(os.path.join(tempDir, 'requirements.txt'), 'w') as f:
f.write("python-dateutil==2.7.5")
p = subprocess.Popen([
'gcloud', 'functions', 'deploy', 'StopOldInstances', '--trigger-topic',
'cron-stop-instances', '--runtime', 'python37', '--source', tempDir,
'--quiet'
],
env=env)
output, err = p.communicate()
logging.info("Functions Deploy: %s -- %s" % (output, err))
shutil.rmtree(tempDir)
# Create a job to execute this function once a day
p = subprocess.Popen(
['gcloud', 'beta', 'app', 'create', '--region', 'us-east1'], env=env)
output, err = p.communicate()
logging.info("Create App for the Scheduler job: %s -- %s" % (output, err))
# Cron to execute daily at midnight
daily = '0 0 * * *'
p = subprocess.Popen([
'gcloud', 'beta', 'scheduler', 'jobs', 'create', 'pubsub',
'StopAllInstances', '--message-body', 'us-east1-b', '--topic',
'projects/%s/topics/%s' % (args.project, TOPIC_NAME), '--schedule', daily
],
env=env)
output, err = p.communicate()
logging.info("Scheduler Jobs Create: %s -- %s" % (output, err))
|
from pathlib import Path
import multiprocessing
from trafficmonitor.trafficviewer import start
from trafficmonitor.helper_functions import create_path
class MitmProxy:
""" class for mitmproxy configuration """
def __init__(self):
# default command for mitmdump lib
self.command = ""
self.proxy_pid = str()
# Data store path
self.path = create_path()
# proxy process initialization
self.proxy_process = None
def start_proxy(self, value):
"""method which will start the mitm process separately from main process"""
proxy_data = {
"IPAddress": value['IP_ADDRESS'],
"DBPath": str(Path(f"{self.path}/{value['EXECUTION_NAME']}.db")),
}
if 'UPSTREAM_PROXY_IP' in value.keys() and 'UPSTREAM_PROXY_PORT' in value.keys():
proxy_data['UpstreamProxyAddress'] = value['UPSTREAM_PROXY_IP']
proxy_data['UpstreamProxyPort'] = value['UPSTREAM_PROXY_PORT']
self.proxy_process = multiprocessing.Process(target=start, kwargs={**proxy_data})
self.proxy_process.daemon = True
self.proxy_process.start()
def stop_proxy(self):
"""method to stop mitm process"""
try:
self.proxy_process.terminate()
except AttributeError:
pass
|
import msvcrt
from elftools.common import *
from elftools.elf.elffile import ELFFile
with open('jnilibs/libnative-lib.so', 'rb') as f:
elf = ELFFile(f)
print(type(elf))
for seg in elf.iter_segments():
print(seg['p_type'], hex(seg['p_offset']), hex(seg['p_vaddr']), hex(seg['p_paddr']))
exit()
sec_num = elf.num_sections()
print('sec_num:' + str(sec_num))
seg_num = elf.num_segments()
print('seg_num:' + str(seg_num))
for i in range(0, sec_num, 1):
section = elf.get_section(i)
print(section);print(type(section))
print(section.data_alignment, section.data_size, str(section.data(), encoding='ISO-8859-1'))
msvcrt.getch()
# for i in range(0, seg_num, 1):
# segment = elf.get_segment(i)
# print(str(segment.data(), encoding='ISO-8859-1'))
# print(segment)
# print(segment.data_alignment, section.data_size, str(section.data(), encoding='ISO-8859-1'))
# msvcrt.getch()
|
import re
import collections
import supybot.utils
# http://ircv3.net/specs/core/message-tags-3.2.html#escaping-values
TAG_ESCAPE = [
('\\', '\\\\'), # \ -> \\
(' ', r'\s'),
(';', r'\:'),
('\r', r'\r'),
('\n', r'\n'),
]
unescape_tag_value = supybot.utils.str.MultipleReplacer(
dict(map(lambda x:(x[1],x[0]), TAG_ESCAPE)))
# TODO: validate host
tag_key_validator = re.compile(r'\+?(\S+/)?[a-zA-Z0-9-]+')
def parse_tags(s):
tags = {}
for tag in s.split(';'):
if '=' not in tag:
tags[tag] = None
else:
(key, value) = tag.split('=', 1)
assert tag_key_validator.match(key), \
'Invalid tag key: {}'.format(key)
tags[key] = unescape_tag_value(value)
return tags
Message = collections.namedtuple('Message',
'tags prefix command params')
def parse_message(s):
"""Parse a message according to
http://tools.ietf.org/html/rfc1459#section-2.3.1
and
http://ircv3.net/specs/core/message-tags-3.2.html"""
assert s.endswith('\r\n'), 'Message does not end with CR LF: {!r}'.format(s)
s = s[0:-2]
if s.startswith('@'):
(tags, s) = s.split(' ', 1)
tags = parse_tags(tags[1:])
else:
tags = {}
if ' :' in s:
(other_tokens, trailing_param) = s.split(' :', 1)
tokens = list(filter(bool, other_tokens.split(' '))) + [trailing_param]
else:
tokens = list(filter(bool, s.split(' ')))
if tokens[0].startswith(':'):
prefix = tokens.pop(0)[1:]
else:
prefix = None
command = tokens.pop(0)
params = tokens
return Message(
tags=tags,
prefix=prefix,
command=command,
params=params,
)
|
from lark import Lark
print_grammar = """
start : instruction+
instruction : "print" STRING [STRING] -> print
| "repeat" NUMBER code_block -> repeat
code_block : "{" instruction+ "}"
STRING: LETTER+
%import common.LETTER
%import common.INT -> NUMBER
%import common.WS
%ignore WS
""" |
#!/usr/bin/env python
"""The EE Python library."""
__version__ = '0.1.172'
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
# pylint: disable=g-bad-import-order
import collections
import datetime
import inspect
import numbers
import os
import six
from . import batch
from . import data
from . import deserializer
from . import ee_types as types
from ._helpers import _GetPersistentCredentials
# Public re-exports.
from ._helpers import ServiceAccountCredentials
from ._helpers import apply # pylint: disable=redefined-builtin
from ._helpers import call
from ._helpers import profilePrinting
from .apifunction import ApiFunction
from .collection import Collection
from .computedobject import ComputedObject
from .customfunction import CustomFunction
from .dictionary import Dictionary
from .ee_date import Date
from .ee_exception import EEException
from .ee_list import List
from .ee_number import Number
from .ee_string import String
from .element import Element
from .encodable import Encodable
from .feature import Feature
from .featurecollection import FeatureCollection
from .filter import Filter
from .function import Function
from .geometry import Geometry
from .image import Image
from .imagecollection import ImageCollection
from .serializer import Serializer
from .terrain import Terrain
# A list of autogenerated class names added by _InitializeGenerateClasses.
_generatedClasses = []
class _AlgorithmsContainer(dict):
"""A lightweight class that is used as a dictionary with dot notation.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
# A dictionary of algorithms that are not bound to a specific class.
Algorithms = _AlgorithmsContainer()
def Initialize(
credentials='persistent',
opt_url=None,
use_cloud_api=False,
cloud_api_key=None):
"""Initialize the EE library.
If this hasn't been called by the time any object constructor is used,
it will be called then. If this is called a second time with a different
URL, this doesn't do an un-initialization of e.g.: the previously loaded
Algorithms, but will overwrite them and let point at alternate servers.
Args:
credentials: OAuth2 credentials. 'persistent' (default) means use
credentials already stored in the filesystem, or raise an explanatory
exception guiding the user to create those credentials.
opt_url: The base url for the EarthEngine REST API to connect to.
use_cloud_api: Whether the Cloud API should be used.
cloud_api_key: An optional API key to use the Cloud API.
"""
if credentials == 'persistent':
credentials = _GetPersistentCredentials()
data.initialize(
credentials=credentials,
api_base_url=(opt_url + '/api' if opt_url else None),
tile_base_url=opt_url,
use_cloud_api=use_cloud_api,
cloud_api_base_url=opt_url,
cloud_api_key=cloud_api_key)
# Initialize the dynamically loaded functions on the objects that want them.
ApiFunction.initialize()
Element.initialize()
Image.initialize()
Feature.initialize()
Collection.initialize()
ImageCollection.initialize()
FeatureCollection.initialize()
Filter.initialize()
Geometry.initialize()
List.initialize()
Number.initialize()
String.initialize()
Date.initialize()
Dictionary.initialize()
Terrain.initialize()
_InitializeGeneratedClasses()
_InitializeUnboundMethods()
def Reset():
"""Reset the library. Useful for re-initializing to a different server."""
data.reset()
ApiFunction.reset()
Element.reset()
Image.reset()
Feature.reset()
Collection.reset()
ImageCollection.reset()
FeatureCollection.reset()
Filter.reset()
Geometry.reset()
List.reset()
Number.reset()
String.reset()
Date.reset()
Dictionary.reset()
Terrain.reset()
_ResetGeneratedClasses()
global Algorithms
Algorithms = _AlgorithmsContainer()
def _ResetGeneratedClasses():
"""Remove the dynamic classes."""
global _generatedClasses
for name in _generatedClasses:
ApiFunction.clearApi(globals()[name])
del globals()[name]
_generatedClasses = []
# Warning: we're passing all of globals() into registerClasses.
# This is a) pass by reference, and b) a lot more stuff.
types._registerClasses(globals()) # pylint: disable=protected-access
def _Promote(arg, klass):
"""Wrap an argument in an object of the specified class.
This is used to e.g.: promote numbers or strings to Images and arrays
to Collections.
Args:
arg: The object to promote.
klass: The expected type.
Returns:
The argument promoted if the class is recognized, otherwise the
original argument.
"""
if arg is None:
return arg
if klass == 'Image':
return Image(arg)
elif klass == 'Feature':
if isinstance(arg, Collection):
# TODO(user): Decide whether we want to leave this in. It can be
# quite dangerous on large collections.
return ApiFunction.call_(
'Feature', ApiFunction.call_('Collection.geometry', arg))
else:
return Feature(arg)
elif klass == 'Element':
if isinstance(arg, Element):
# Already an Element.
return arg
elif isinstance(arg, Geometry):
# Geometries get promoted to Features.
return Feature(arg)
elif isinstance(arg, ComputedObject):
# Try a cast.
return Element(arg.func, arg.args, arg.varName)
else:
# No way to convert.
raise EEException('Cannot convert %s to Element.' % arg)
elif klass == 'Geometry':
if isinstance(arg, Collection):
return ApiFunction.call_('Collection.geometry', arg)
else:
return Geometry(arg)
elif klass in ('FeatureCollection', 'Collection'):
# For now Collection is synonymous with FeatureCollection.
if isinstance(arg, Collection):
return arg
else:
return FeatureCollection(arg)
elif klass == 'ImageCollection':
return ImageCollection(arg)
elif klass == 'Filter':
return Filter(arg)
elif klass == 'Algorithm':
if isinstance(arg, six.string_types):
# An API function name.
return ApiFunction.lookup(arg)
elif callable(arg):
# A native function that needs to be wrapped.
args_count = len(inspect.getargspec(arg).args)
return CustomFunction.create(arg, 'Object', ['Object'] * args_count)
elif isinstance(arg, Encodable):
# An ee.Function or a computed function like the return value of
# Image.parseExpression().
return arg
else:
raise EEException('Argument is not a function: %s' % arg)
elif klass == 'Dictionary':
if isinstance(arg, dict):
return arg
else:
return Dictionary(arg)
elif klass == 'String':
if (types.isString(arg) or
isinstance(arg, ComputedObject) or
isinstance(arg, String)):
return String(arg)
else:
return arg
elif klass == 'List':
return List(arg)
elif klass in ('Number', 'Float', 'Long', 'Integer', 'Short', 'Byte'):
return Number(arg)
elif klass in globals():
cls = globals()[klass]
ctor = ApiFunction.lookupInternal(klass)
# Handle dynamically created classes.
if isinstance(arg, cls):
# Return unchanged.
return arg
elif ctor:
# The client-side constructor will call the server-side constructor.
return cls(arg)
elif isinstance(arg, six.string_types):
if hasattr(cls, arg):
# arg is the name of a method in klass.
return getattr(cls, arg)()
else:
raise EEException('Unknown algorithm: %s.%s' % (klass, arg))
else:
# Client-side cast.
return cls(arg)
else:
return arg
def _InitializeUnboundMethods():
# Sort the items by length, so parents get created before children.
items = sorted(
ApiFunction.unboundFunctions().items(), key=lambda x: len(x[0]))
for name, func in items:
signature = func.getSignature()
if signature.get('hidden', False):
continue
# Create nested objects as needed.
name_parts = name.split('.')
target = Algorithms
while len(name_parts) > 1:
first = name_parts[0]
# Set the attribute if it doesn't already exist. The try/except block
# works in both Python 2 & 3.
try:
getattr(target, first)
except AttributeError:
setattr(target, first, _AlgorithmsContainer())
target = getattr(target, first)
name_parts = name_parts[1:]
# Attach the function.
# We need a copy of the function to attach properties.
def GenerateFunction(f):
return lambda *args, **kwargs: f.call(*args, **kwargs) # pylint: disable=unnecessary-lambda
bound = GenerateFunction(func)
bound.signature = signature
# Add docs. If there are non-ASCII characters in the docs, and we're in
# Python 2, use a hammer to force them into a str.
try:
bound.__doc__ = str(func)
except UnicodeEncodeError:
bound.__doc__ = func.__str__().encode('utf8')
setattr(target, name_parts[0], bound)
def _InitializeGeneratedClasses():
"""Generate classes for extra types that appear in the web API."""
signatures = ApiFunction.allSignatures()
# Collect the first part of all function names.
names = set([name.split('.')[0] for name in signatures])
# Collect the return types of all functions.
returns = set([signatures[sig]['returns'] for sig in signatures])
want = [name for name in names.intersection(returns) if name not in globals()]
for name in want:
globals()[name] = _MakeClass(name)
_generatedClasses.append(name)
ApiFunction._bound_signatures.add(name) # pylint: disable=protected-access
# Warning: we're passing all of globals() into registerClasses.
# This is a) pass by reference, and b) a lot more stuff.
types._registerClasses(globals()) # pylint: disable=protected-access
def _MakeClass(name):
"""Generates a dynamic API class for a given name."""
def init(self, *args):
"""Initializer for dynamically created classes.
Args:
self: The instance of this class. Listed to make the linter hush.
*args: Either a ComputedObject to be promoted to this type, or
arguments to an algorithm with the same name as this class.
Returns:
The new class.
"""
klass = globals()[name]
onlyOneArg = (len(args) == 1)
# Are we trying to cast something that's already of the right class?
if onlyOneArg and isinstance(args[0], klass):
result = args[0]
else:
# Decide whether to call a server-side constructor or just do a
# client-side cast.
ctor = ApiFunction.lookupInternal(name)
firstArgIsPrimitive = not isinstance(args[0], ComputedObject)
shouldUseConstructor = False
if ctor:
if not onlyOneArg:
# Can't client-cast multiple arguments.
shouldUseConstructor = True
elif firstArgIsPrimitive:
# Can't cast a primitive.
shouldUseConstructor = True
elif args[0].func != ctor:
# We haven't already called the constructor on this object.
shouldUseConstructor = True
# Apply our decision.
if shouldUseConstructor:
# Call ctor manually to avoid having promote() called on the output.
ComputedObject.__init__(
self, ctor, ctor.promoteArgs(ctor.nameArgs(args)))
else:
# Just cast and hope for the best.
if not onlyOneArg:
# We don't know what to do with multiple args.
raise EEException(
'Too many arguments for ee.%s(): %s' % (name, args))
elif firstArgIsPrimitive:
# Can't cast a primitive.
raise EEException(
'Invalid argument for ee.%s(): %s. Must be a ComputedObject.' %
(name, args))
else:
result = args[0]
ComputedObject.__init__(self, result.func, result.args, result.varName)
properties = {'__init__': init, 'name': lambda self: name}
new_class = type(str(name), (ComputedObject,), properties)
ApiFunction.importApi(new_class, name, name)
return new_class
# Set up type promotion rules as soon the package is loaded.
Function._registerPromoter(_Promote) # pylint: disable=protected-access
|
from LedAnimation import Animation, KeyFrame
from neopixel import *
import time
from random import randint
class RandBuildAnimation(Animation):
def BuildDef(self, strip, kwargs):
active = [0] * strip.numPixels()
colours = kwargs['colours']
delay = kwargs['delay']
while 0 in active:
target = randint(0, strip.numPixels() - 1)
if (active[target] == 0):
active[target] = 1
strip.setPixelColor(target, colours[randint(0, len(colours) - 1)])
time.sleep(delay/1000.0)
strip.show()
def GenerateRandomColours(self, numColours=4):
colours = []
for j in range(numColours):
colours.append(Color(randint(0, 255), randint(0, 255), randint(0, 255)))
return colours
def BuildIn(self, strip, color=Color(30,30,90)):
#colours = self.GenerateRandomColours()
frame1 = KeyFrame(self.BuildDef, strip, colours=color, delay=30)
self.AddFrame(frame1)
return self
def __init__(self, max_brightness=30, base_color=Color(30,30,90)):
self.max_brightness = max_brightness
self.base_color = base_color
Animation.__init__(self) |
from lxml import etree
if __name__ == "__main__":
h1 = etree.Element("h1")
h2 = etree.Element("h2")
h1.text = "s"
h2.text = "ss"
h1.set("class", "h")
h1.append(h2)
# print(dir(h1))
print(etree.tostring(h1, doctype="<!DOCTYPE html>"))
print(etree.tostring(h1, doctype=None)) |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2018 Peter Williams and collaborators.
# Licensed under the MIT License.
"""Solving radiation belt populations using the stochastic differential
equation approach.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from pwkit import cgs
from scipy import interpolate, special
import time
from .config import Configuration
class BoundaryConfiguration(Configuration):
__section__ = 'boundary-condition'
temperature = 1.2e6
"The temperature of the Maxwell-Boltzmann momentum distribution, in Kelvin."
def to_boundary(self):
return IsotropicMaxwellianBoundary(self.temperature)
class IsotropicMaxwellianBoundary(object):
"""A boundary condition for high L in which the particles are isotropic and
have a (non-relativistic) Maxwell-Boltzman momentum distribution. The
integral of the distribution function over all momenta and pitch angles is
1. (I.e., if the lowest possible pitch angle in the SDE evaluation is 5°,
the integral of this function does not account for that. Scaling to actual
particle densities should be applied after evaluating the SDE.)
In the Maxwellian distribution, 99.9% of the particles have momenta in the
range [0.1236, 4.2107] * sqrt(m k T). This is therefore the range that
must be sampled well among exiting particles in order to get a good
measurement using the SDE approach.
Divine & Garrett (1983) says the "cold" Jovian plasma has a characteristic
temperature of around 100 eV, which is around 1.2 MK. The 99.9% momentum
range is therefore [4.80e-20, 1.64e-18] g*cm/s = [0.00176, 0.0600] m_e c.
In the isotropic pitch angle distribution, 99.9% of the particles have
pitch angles greater than 0.04483 radians = 2.56 degrees. Note that this
value is smaller than our typical loss cone sizes.
"""
def __init__(self, T):
"T is the particle temperature in Kelvin; it should be non-relativistic."
self.T = T
mkT = cgs.me * cgs.k * T
self.k1 = np.sqrt(2 / (np.pi * mkT**3))
self.k2 = -0.5 / mkT
self.k3 = 1. / np.sqrt(2 * mkT)
self.k4 = np.sqrt(2 / (np.pi * mkT))
def at_position(self, g, alpha, L):
p2 = cgs.me * cgs.c * np.exp(2 * g)
return self.k1 * p2 * np.exp(self.k2 * p2) * np.sin(alpha)
def in_L_cell(self, g0, g1, alpha0, alpha1):
p0 = cgs.me * cgs.c * np.exp(g0)
p1 = cgs.me * cgs.c * np.exp(g1)
p_contrib = ((special.erf(self.k3 * p1) - special.erf(self.k3 * p0)) +
self.k4 * (p0 * np.exp(self.k2 * p0**2) - p1 * np.exp(self.k2 * p1**2)))
a_contrib = np.cos(alpha0) - np.cos(alpha1)
return p_contrib * a_contrib
def sample(self, rbi, n):
"""Generate a random sampling of particle momenta and pitch angles consistent
with this boundary condition.
Returns `(g, alpha)`, where both tuple items are 1D n-sized vectors of
values.
We take the *rbi* argument to ensure that our generated values lie
within the parameter range it allows. We do a hack of re-drawing
values for out-of-bounds parameters; of course this technically
monkeys with the final distributions marginally, but it's still the
least-bad approach.
"""
sigma = np.sqrt(cgs.me * cgs.k * self.T)
momenta = np.random.normal(scale=sigma, size=(n, 3))
g = np.log(np.sqrt((momenta**2).sum(axis=1)) / (cgs.me * cgs.c)) # assuming non-relativistic
while True:
bad = (g < rbi.g_edges[0])
n_bad = bad.sum()
if not n_bad:
break
momenta = np.random.normal(scale=sigma, size=(n_bad, 3))
g[bad] = np.log(np.sqrt((momenta**2).sum(axis=1)) / (cgs.me * cgs.c))
alpha = np.arccos(np.random.uniform(0, 1, size=n))
while True:
bad = (alpha < rbi.alpha_edges[0])
n_bad = bad.sum()
if not n_bad:
break
alpha[bad] = np.arccos(np.random.uniform(0, 1, size=n_bad))
return g, alpha
class RadBeltIntegrator(object):
def __init__(self, path):
self.a = [None, None, None]
self.b = [[None], [None, None], [None, None, None]]
with open(path, 'rb') as f:
self.g_edges = np.load(f)
self.alpha_edges = np.load(f)
self.L_edges = np.load(f)
for i in range(3):
self.a[i] = np.load(f)
for i in range(3):
for j in range(i + 1):
self.b[i][j] = np.load(f)
self.loss = np.load(f)
self.loss_L_min = np.load(f) # scalar
self.alpha_min = np.load(f) # same shape as L_edges
self.lndt = np.load(f)
self.g_centers = 0.5 * (self.g_edges[:-1] + self.g_edges[1:])
self.alpha_centers = 0.5 * (self.alpha_edges[:-1] + self.alpha_edges[1:])
self.L_centers = 0.5 * (self.L_edges[:-1] + self.L_edges[1:])
# XXX TRANSPOSE IS DUMB
self.i_a = [None, None, None]
self.i_b = [[None, None, None], [None, None, None], [None, None, None]]
points = [self.g_edges, self.alpha_edges, self.L_edges]
for i in range(3):
self.i_a[i] = interpolate.RegularGridInterpolator(points, self.a[i].T)
for j in range(i + 1):
self.i_b[i][j] = interpolate.RegularGridInterpolator(points, self.b[i][j].T)
self.i_b[j][i] = self.i_b[i][j]
self.i_loss = interpolate.RegularGridInterpolator(points, self.loss.T)
# In the Jokipii method function, we use this interpolator with
# particles that might be out of bounds, which would lead to an
# exception if we used the default `bounds_error = True`. Fortunately,
# precisely because such particles are out of bounds, we can ignore
# the error and return a nonsense value.
self.i_alpha_min = interpolate.RegularGridInterpolator([self.L_edges], self.alpha_min,
bounds_error=False, fill_value=100.)
self.i_lndt = interpolate.RegularGridInterpolator(points, self.lndt.T)
def trace_one(self, g0, alpha0, L0, n_steps):
history = np.empty((5, n_steps))
s = 0.
pos = np.array([g0, alpha0, L0, 0.])
for i_step in range(n_steps):
if pos[0] <= self.g_edges[0]:
break # too cold to care anymore
if pos[0] >= self.g_edges[-1]:
print('warning: particle energy got too high')
break
if pos[1] <= self.i_alpha_min([pos[2]]).item():
break # loss cone
if pos[1] > np.pi / 2:
pos[1] = np.pi - pos[1] # mirror at pi/2 pitch angle
if pos[2] <= self.loss_L_min:
break # (drift-averaged) surface impact
if pos[2] >= self.L_edges[-1]:
break # hit source boundary condition
history[0,i_step] = s
history[1:,i_step] = pos
lam = np.random.normal(size=3)
delta_t = np.exp(self.i_lndt(pos[:3]))
##print('dt:', delta_t)
sqrt_delta_t = np.sqrt(delta_t)
delta_pos = np.zeros(3)
for i in range(3):
delta_pos[i] += self.i_a[i](pos[:3]) * delta_t
##print('a', i, self.i_a[i](pos), self.i_a[i](pos) * delta_t)
for j in range(3):
delta_pos[i] += self.i_b[i][j](pos[:3]) * sqrt_delta_t * lam[j]
##print('b', i, j, self.i_b[i][j](pos) * sqrt_delta_t)
pos[3] -= delta_t * self.i_loss(pos[:3])
pos[:3] += delta_pos
s += delta_t # sigh, terminology all over the place
return history[:,:i_step]
def jokipii_many(self, bdy, n_particles, n_steps, print_nth=1000):
"""Jokipii & Levy technique."""
state = np.empty((5, n_particles)) # (g, alpha, L, log-weight, residence time)
state[0], state[1] = bdy.sample(self, n_particles)
state[2] = self.L_edges[-1]
state[3] = 0.
state[4] = 0.
# The answer
grid = np.zeros((self.L_centers.size, self.alpha_centers.size, self.g_centers.size))
sum_residence_times = 0 # measured in steps
max_residence_time = 0
n_exited = 0
# Go
t0 = time.time()
step_num = 0
g0 = self.g_edges[0]
gscale = 0.999999 * self.g_centers.size / (self.g_edges[-1] - g0)
alpha0 = self.alpha_edges[0]
alphascale = 0.999999 * self.alpha_centers.size / (self.alpha_edges[-1] - alpha0)
L0 = self.L_edges[0]
Lscale = 0.999999 * self.L_centers.size / (self.L_edges[-1] - L0)
for step_num in range(n_steps):
if step_num % print_nth == 0:
print(' Step {}'.format(step_num))
# delta-t for these samples
posT = state[:3].T
delta_t = np.exp(self.i_lndt(posT))
# Record each position. ndarray.astype(np.int) truncates toward 0:
# 0.9 => 0, 1.1 => 1, -0.9 => 0, -1.1 => -1.
g_indices = (gscale * (state[0] - g0)).astype(np.int)
alpha_indices = (alphascale * (state[1] - alpha0)).astype(np.int)
L_indices = (Lscale * (state[2] - L0)).astype(np.int)
for i in range(n_particles):
grid[L_indices[i], alpha_indices[i], g_indices[i]] += delta_t[i] * np.exp(state[3,i])
# Advance
lam = np.random.normal(size=(3, n_particles))
sqrt_delta_t = np.sqrt(delta_t)
delta_state = np.zeros(state.shape)
for i in range(3):
delta_state[i] += self.i_a[i](posT) * delta_t
for j in range(3):
delta_state[i] += self.i_b[i][j](posT) * sqrt_delta_t * lam[j]
delta_state[3] = -self.i_loss(posT) * delta_t
delta_state[4] = 1.
state += delta_state
# Deal with particles exiting out of bounds
oob = (
(state[0] < self.g_edges[0]) |
(state[0] > self.g_edges[-1]) |
(state[1] < self.i_alpha_min(state[2])) |
(state[2] < self.loss_L_min) |
(state[2] > self.L_edges[-1])
)
n_exiting = oob.sum()
n_exited += n_exiting
max_residence_time = max(max_residence_time, state[4,oob].max())
sum_residence_times += state[4,oob].sum()
state[0,oob], state[1,oob] = bdy.sample(self, n_exiting)
state[2,oob] = self.L_edges[-1]
state[3,oob] = 0.
state[4,oob] = 0.
# Mirror at pi/2 pitch angle
pa_mirror = (state[1] > 0.5 * np.pi)
state[1,pa_mirror] = np.pi - state[1,pa_mirror]
elapsed = time.time() - t0
print('elapsed: %.0f seconds' % elapsed)
print('total particle-steps:', n_particles * n_steps)
print('particle-steps per ms: %.0f' % (0.001 * n_particles * n_steps / elapsed))
print('mean residence time:', sum_residence_times / n_exited)
print('max residence time:', max_residence_time)
return grid
def plot_cube(self, c):
import omega as om
med_g = np.median(c, axis=(1, 2))
med_a = np.median(c, axis=(0, 2))
med_l = np.median(c, axis=(0, 1))
mn = min(med_g.min(), med_a.min(), med_l.min())
mx = max(med_g.max(), med_a.max(), med_l.max())
delta = abs(mx - mn) * 0.02
mx += delta
mn -= delta
hb = om.layout.HBox(3)
hb[0] = om.quickXY(self.g_edges, med_g, u'g', ymin=mn, ymax=mx)
hb[1] = om.quickXY(self.alpha_edges, med_a, u'α', ymin=mn, ymax=mx)
hb[1].lpainter.paintLabels = False
hb[2] = om.quickXY(self.L_edges, med_l, u'L', ymin=mn, ymax=mx)
hb[2].lpainter.paintLabels = False
hb.setWeight(0, 1.1) # extra room for labels
return hb
def cube_to_particles(self, f_cube):
"""Given a cube of particle densities of shape (n_L, n_alpha, n_g), map it to
the "ParticleDistribution" type that we use for the radiative transfer
integration. The units of *f_cube* are not assumed to be anything in
particular.
Adapted from vernon.dolfin.ThreeDCoordinates.to_particles.
"""
from .particles import ParticleDistribution
nl = self.L_centers.size
na = self.alpha_centers.size
ng = self.g_centers.size
sh = (nl, na, ng)
Ek_cube = 0.511 * (np.sqrt(np.exp(2 * self.g_centers) + 1) - 1) # measured in MeV
Ek_cube = np.broadcast_to(Ek_cube, sh)
y_cube = np.broadcast_to(np.sin(self.alpha_centers), sh)
# Latitudes traversed by the particles. Because we have a loss cone,
# large latitudes are not attainable. The limit scales pretty
# strongly: for a conservative loss cone that only includes alpha < 1
# degree, the maximum attainable latitude is < 74 degrees. (See
# Shprits [2006], equation 10.).
nlat = na // 2
lats = np.linspace(0, 75 * np.pi / 180, nlat)
# Similar to above for `y = sin(alpha)` traversed by particles. The
# loss cone once again makes it so that y <~ 0.02 is not attainable,
# but the numerics don't blow up here the way they do for `r`.
ny = na // 4
ys = np.linspace(0., 1., ny)
# Energies. The highest energies on the grid are often unpopulated so
# we set the bounds here dynamically.
E_any = Ek_cube[f_cube > 0]
ne = ng // 2
emin = E_any.min()
emax = E_any.max()
energies = np.linspace(emin, emax, ne)
# `r**2` is the ratio of the magnetic field strength at latitude `lat`
# to the strength at `lat = 0`. By conservation of the invariant `mu ~
# B(lat) / sin^2(alpha(lat))`, we can use `r` to determine how pitch
# angle varies with magnetic latitude for a given bouncing particle.
cl = np.cos(lats)
r = np.sqrt(np.sqrt(4 - 3 * cl**2) / cl**6)
# Figure out how we'll interpolate each particle onto the energy grid.
# Energy is conserved over a bounce so we just split the contribution
# between two adjacent energy cells in standard fashion.
xe_cube = (Ek_cube - emin) / (emax - emin) * (ne - 1)
fe_cube = np.floor(xe_cube).astype(np.int)
re_cube = xe_cube - fe_cube
w = (fe_cube == ne - 1) # patch up to interpolate correctly at emax
fe_cube[w] = ne - 2
re_cube[w] = 1.
# The big loop.
mapped = np.zeros((nl, nlat, ny, ne))
for i_l in range(nl):
bigiter = zip(f_cube[i_l].flat, y_cube[i_l].flat, fe_cube[i_l].flat, re_cube[i_l].flat)
for f, y, fe, re in bigiter:
if f <= 0:
continue
bounce_ys = y * r
max_lat_idx = np.searchsorted(bounce_ys, 1)
max_lat_idx = np.maximum(max_lat_idx, 1) # hack for `y = 1` precisely
assert np.all(max_lat_idx < nlat)
norm = 1. / max_lat_idx
# Figure out how to interpolate each `y` value onto its grid
xy_bounce = bounce_ys[:max_lat_idx] * (ny - 1)
fy_bounce = np.floor(xy_bounce).astype(np.int)
ry_bounce = xy_bounce - fy_bounce
w = (fy_bounce == ny - 1)
fy_bounce[w] = ny - 2
ry_bounce[w] = 1.
# Ready to go.
mapped[i_l,:max_lat_idx,fy_bounce ,fe ] += norm * (1 - ry_bounce) * (1 - re) * f
mapped[i_l,:max_lat_idx,fy_bounce ,fe+1] += norm * (1 - ry_bounce) * re * f
mapped[i_l,:max_lat_idx,fy_bounce+1,fe ] += norm * ry_bounce * (1 - re) * f
mapped[i_l,:max_lat_idx,fy_bounce+1,fe+1] += norm * ry_bounce * re * f
return ParticleDistribution(self.L_centers, lats, ys, energies, mapped)
# Command-line interface
import argparse
from pwkit.cli import die
def average_cli(args):
"""Average together a bunch of cubes.
"""
import sys
inputs = args[:-1]
output = args[-1]
if not len(inputs):
print('error: must specify at least one input cube', file=sys.stderr)
sys.exit(1)
with open(inputs[0], 'rb') as f:
arr = np.load(f)
for inp in inputs[1:]:
with open(inp, 'rb') as f:
arr += np.load(f)
arr /= len(inputs)
with open(output, 'wb') as f:
np.save(f, arr)
def cube_to_particles_cli(args):
"""Compute a ParticleDistribution save file from a cube in g/alpha/L space.
"""
ap = argparse.ArgumentParser(
prog = 'vernon sde cube-to-particles',
)
ap.add_argument('grid_path', metavar='GRID-PATH',
help='The path to the input file of gridded coefficients.')
ap.add_argument('cube_path', metavar='CUBE-PATH',
help='The path to the input file of particle densities.')
ap.add_argument('output_path', metavar='OUTPUT-PATH',
help='The destination path for the ParticleDistribution save file.')
settings = ap.parse_args(args=args)
rbi = RadBeltIntegrator(settings.grid_path)
with open(settings.cube_path, 'rb') as f:
cube = np.load(f)
particles = rbi.cube_to_particles(cube)
particles.save(settings.output_path)
def forward_cli(args):
"""Do a forward-integration run
"""
ap = argparse.ArgumentParser(
prog = 'vernon sde forward',
)
ap.add_argument('-c', dest='config_path', metavar='CONFIG-PATH',
help='The path to the configuration file.')
ap.add_argument('-p', dest='particles', type=int, metavar='PARTICLES', default=8192,
help='The number of particles to track at once.')
ap.add_argument('-s', dest='steps', type=int, metavar='STEPS', default=100000,
help='The number of steps to make.')
ap.add_argument('-N', dest='print_nth', type=int, metavar='STEPS', default=1000,
help='Print a brief notice for every STEPS steps that are computed.')
ap.add_argument('grid_path', metavar='GRID-PATH',
help='The path to the input file of gridded coefficients.')
ap.add_argument('output_path', metavar='OUTPUT-PATH',
help='The destination path for the NPY file of particle positions.')
settings = ap.parse_args(args=args)
config = BoundaryConfiguration.from_toml(settings.config_path)
bdy = config.to_boundary()
rbi = RadBeltIntegrator(settings.grid_path)
grid = rbi.jokipii_many(bdy, settings.particles, settings.steps, print_nth=settings.print_nth)
with open(settings.output_path, 'wb') as f:
np.save(f, grid)
def entrypoint(argv):
if len(argv) == 1:
die('must supply a subcommand: "average", "cube-to-particles", "forward", "gen-gg-config", "gen-grid"')
if argv[1] == 'average':
average_cli(argv[2:])
elif argv[1] == 'cube-to-particles':
cube_to_particles_cli(argv[2:])
elif argv[1] == 'forward':
forward_cli(argv[2:])
elif argv[1] == 'gen-gg-config':
from .grid import GenGridTask
GenGridTask.generate_config_cli('sde gen-gg-config', argv[2:])
elif argv[1] == 'gen-grid':
from .grid import gen_grid_cli
gen_grid_cli(argv[2:])
else:
die('unrecognized subcommand %r', argv[1])
|
from ..mapper import PropertyMapper, ApiInterfaceBase
from ..mapper.types import Timestamp, AnyType
__all__ = ['StoryAppAttribution', 'StoryAppAttributionInterface']
class StoryAppAttributionInterface(ApiInterfaceBase):
app_action_text: str
app_icon_url: str
content_url: str
id: int
link: str
name: str
class StoryAppAttribution(PropertyMapper, StoryAppAttributionInterface):
pass
|
# Copyright 2020 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Machine for auto installation of debian based operating systems.
"""
#
# IMPORTS
#
from enum import Enum
from secrets import token_urlsafe
from tessia.server.state_machines.autoinstall.plat_base import PlatBase
from tessia.server.state_machines.autoinstall.model import \
AutoinstallMachineModel
from tessia.server.config import Config
from tessia.server.state_machines.autoinstall.sm_base import SmBase
from tessia.server.state_machines.autoinstall.sm_base import TEMPLATES_DIR
from time import monotonic, sleep
from urllib.parse import urlparse
import jinja2
import json
import logging
import os
import requests
#
# CONSTANTS AND DEFINITIONS
#
EventMarker = Enum('EventMarker', 'NONE SUCCESS FAIL')
#
# CODE
#
class LogEvent:
"""
Event parsed from event stream
"""
def __init__(self, event_string):
"""
Constructor
Arguments:
event_string (str): string describing event
"""
self._name = ''
self._result = ''
self._origin = ''
self._type = ''
self._description = ''
# binary/raw events, only metadata is logged
if event_string.startswith('binary:'):
self._name = 'binary'
self._str = event_string[7:]
return
try:
event_object = json.loads(event_string)
except json.JSONDecodeError:
self._str = 'Undecodeable event: {}'.format(event_string)
return
self._name = event_object.get("name", "?name?")
self._origin = event_object.get("origin", "?origin?")
self._result = event_object.get("result", "START")
if self._origin == 'watchdog':
self._str = "File {} {}".format(
self._name,
"retrieved" if self._result == 'SUCCESS' else "not present")
else:
self._type = event_object.get("event_type", "")
self._description = event_object.get("description", "")
if self._description:
self._description = ": " + self._description
self._str = "{} {} {}".format(
self._result, self._name, self._description)
# __init__()
def __str__(self):
"""
Return string definition of an event
"""
return self._str
# __str__()
# LogEvent()
class LogWatcher:
"""
Installation state detector
"""
def __init__(self, end_time):
"""
Constructor
Arguments:
end_time (number): when to stop watching
"""
self._end_watch = end_time
self._success = False
self._failure = False
# __init__()
@staticmethod
def _is_success_trigger(event):
"""
Detect success from event. May be overridden
"""
return (event._name in (
"subiquity/Reboot",
"subiquity/Reboot/reboot")
and event._result == "SUCCESS"
and event._type == "finish")
# _is_success_trigger()
@staticmethod
def _is_failure_trigger(event):
"""
Detect failure from event. May be overridden
"""
return (event._name.startswith("subiquity/Error")
and "/var/crash" in event._description
and not "server_request_fail" in event._description)
def process(self, event, current_time):
"""
Find out if it is failure or success
Arguments:
event (LogEvent): a set of values from log
current_time (number): current time
Returns:
EventMarker: what does the event signify
"""
if self._is_success_trigger(event):
self._success = True
return EventMarker.SUCCESS
if self._is_failure_trigger(event):
self._failure = True
# we want to exit the loop now, but we should
# wait for additional watchdog messages to come
self._end_watch = current_time + 10
return EventMarker.FAIL
return EventMarker.NONE
@property
def success(self):
"""
Has detector found successful state
"""
return self._success
@property
def failure(self):
"""
Has detector found failure state
"""
return self._failure
def should_stop(self, current_time):
"""
Should detector stop running
"""
return self.success or (current_time > self._end_watch)
class LogWatcherUbuntu2010(LogWatcher):
"""
Installation state detector for Ubuntu 20.10
"""
def _is_success_trigger(self, event):
"""
Detect success from event. May be overridden
"""
return (event._name == "subiquity/Reboot/apply_autoinstall_config"
and event._result == "SUCCESS"
and event._type == "finish")
# _is_success_trigger()
class SmSubiquityInstaller(SmBase):
"""
State machine for SubiquityInstaller installer
"""
# the type of linux distribution supported
DISTRO_TYPE = 'subiquity'
def __init__(self, model: AutoinstallMachineModel,
platform: PlatBase, *args, **kwargs):
"""
Constructor
"""
super().__init__(model, platform, *args, **kwargs)
self._logger = logging.getLogger(__name__)
# get communication settings from config
autoinstall_config = Config.get_config().get("auto_install")
if not autoinstall_config:
raise RuntimeError('No auto_install configuration provided')
webhook_config = Config.get_config().get("installer-webhook")
if not webhook_config:
raise RuntimeError('No installer-webhook configuration provided')
# expect webhook control in the same container
self._webhook_control = "http://localhost:{}".format(
webhook_config['control_port'])
# webhook log address should be accessible to target system
hostname = urlparse(autoinstall_config["url"]).hostname
self._webhook_logger = "http://{}:{}/log/".format(
hostname, webhook_config['webhook_port'])
self._session_secret = token_urlsafe()
self._session_id = "{}-{}".format(
self._profile.system_name, self._profile.profile_name)
# use a common requests session during the whole install process
self._session = requests.Session()
# __init__()
@staticmethod
def _add_systemd_osname(iface):
"""
Determine and add a key to the iface dict representing the kernel
device name used by the installer for the given network interface
Args:
iface (dict): network interface information dict
"""
ccwgroup = iface['attributes']["ccwgroup"].split(",")
# The control read device number is used to create a predictable
# device name for OSA network interfaces (for details see
# https://www.freedesktop.org/wiki/Software/systemd/
# PredictableNetworkInterfaceNames/)
iface["systemd_osname"] = (
"enc{}".format(ccwgroup[0].lstrip('.0'))
)
# _add_systemd_osname()
def _create_webhook_session(self):
"""
Register a new session on the webhook
Webhook will verify token on incoming requests using our secret
"""
self._logger.info("Creating webhook session %s", self._session_id)
data = {
"id": self._session_id,
"log_path": os.getcwd(),
# Timeout after last message received by webhook, after which
# session is considered "hanging" and therefore removed.
# There is an installation step, which downloads security updates,
# during which the system reports nothing, so this timeout
# is somewhat large. At the same time, installer keeps an eye on
# crashes, so larger values should not present an issue
# of a failed installation hanging for too long.
"timeout": 1200,
"secret": self._session_secret
}
http_result = self._session.post(self._webhook_control + "/session",
json=data)
if http_result.status_code != 201:
RuntimeError('Installation could not be started:' +
' failed to open webhook session')
def _read_events(self):
"""
Query events stream from webhook
"""
max_wait_install = 3600
timeout_installation = monotonic() + max_wait_install
if self._model.operating_system.major == 2010:
# Use different marker for Ubuntu 20.10
watcher = LogWatcherUbuntu2010(timeout_installation)
else:
watcher = LogWatcher(timeout_installation)
frequency_check = 2.5
last_event = 0
watchdog_events = []
while not watcher.should_stop(monotonic()):
session_logs = self._session.get(
"{}/session/{}/logs".format(self._webhook_control,
self._session_id),
params={"start": last_event, "end": 0})
if session_logs.status_code != 200:
raise RuntimeError("Could not read installation logs")
# We receive a number of log events that were captured by webhook.
# These include text messages from subiquity, and file metadata
# from installer watchdog
events = session_logs.json()
last_event += len(events)
for event_string in events:
log_event = LogEvent(event_string)
if log_event._origin == 'watchdog':
watchdog_events.append(log_event)
else:
self._logger.info("%s", str(log_event))
marker = watcher.process(log_event, monotonic())
if marker == EventMarker.FAIL:
self._logger.fatal("Detected installation failure")
sleep(frequency_check)
if not watcher.success:
if watcher.failure:
# dump what we know
for event in watchdog_events:
self._logger.debug("%s", str(event))
raise RuntimeError('Installation could not be completed')
# no failure detected, but no success either
raise TimeoutError('Installation Timeout: The installation'
' process is taking too long')
# _read_events()
def _render_installer_cmdline(self):
"""
Returns installer kernel command line from the template
"""
# try to find a template specific to this OS version
template_filename = 'subiquity.cmdline.jinja'
with open(TEMPLATES_DIR + template_filename, "r") as template_file:
template_content = template_file.read()
self._logger.debug(
"Using subiquity installer cmdline template for "
"OS %s of type '%s'", self._os.name, self._os.type)
template_obj = jinja2.Template(template_content)
return template_obj.render(config=self._info).strip()
@staticmethod
def _convert_fs(fs_name):
"""
Convert the filesystem name to a name valid for parted.
Args:
fs_name (str): filesystem name
Returns:
str: the filesystem name adapted for parted
"""
# adapt fs name for parted
if fs_name in ('ext2', 'ext3', 'ext4'):
fs_name = 'ext2'
elif fs_name == 'swap':
fs_name = 'linux-swap'
return fs_name
# _convert_fs()
def cleanup(self):
"""
Called upon job cancellation or end. Deletes the autofile if it exists.
Do not call this method directly but indirectly from machine.py to make
sure that the cleaning_up variable is set.
"""
http_result = self._session.delete("{}/session/{}".format(
self._webhook_control, self._session_id))
if http_result.status_code != 200:
self._logger.debug("Webhook session %s not removed: %s",
self._session_id, http_result.text)
else:
self._logger.debug("Removed webhook session %s: %s",
self._session_id, http_result.text)
self._session.close()
super().cleanup()
# cleanup()
def fill_template_vars(self):
"""
See SmBase for docstring.
"""
# collect repos, volumes, ifaces
super().fill_template_vars()
# Gather the device numbers of the disks and the paths
# (denov, wwpn, lun).
for svol in self._info["svols"]:
try:
svol["part_table"]["type"]
except (TypeError, KeyError):
continue
part_table = svol['part_table']
if part_table["type"] == "msdos":
part_table["table"].sort(
key=lambda x: 0 if x['type'] == 'primary' else 1
)
# This will accumulate the size until now
size = 0
for i in range(len(part_table["table"])):
part = part_table["table"][i]
if part['type'] == 'logical':
part_table["table"].insert(
i, {
'type': 'extended',
"size": (svol['size'] - size),
'fs': "",
'mo': None,
'mp': None
})
break
size += part['size']
ref_size = 1
part_index = 1
for part in part_table["table"]:
part['start'] = ref_size
# In case the partition table is not msdos
part.setdefault('type', '')
# There is only primary/extended/logical partitions for msdos
# msdos part table.
if part_table['type'] != 'msdos':
part['type'] = ''
part['end'] = ref_size + part['size']
part['parted_fs'] = self._convert_fs(part['fs'])
part['device'] = (svol['system_attributes']['device']
+ '-part{}'.format(part_index))
# multipath partitions follow a different rule to name the
# devices
if (svol['type'] == 'FCP' and svol['specs']['multipath']
and self._info['system_type'] != 'KVM'):
part['device'] = (
"/dev/disk/by-id/dm-uuid-part{}-mpath-{}".format(
part_index, svol['specs']['wwid']))
if part['type'] == 'extended':
ref_size += 1
part_index = 5
else:
ref_size += part['size']
part_index += 1
if svol['is_root']:
self._info['root_disk'] = svol
# Gather the device numbers of the OSA interfaces.
for iface in self._info["ifaces"] + [self._info['gw_iface']]:
if iface["type"] == "OSA":
self._add_systemd_osname(iface)
# It is easier to get the following information here than in the
# template.
for repo in self._info['repos']:
# install repository url: no parsing needed
if repo['os'] and repo['install_image']:
iso_url = urlparse(repo['install_image'])
# handle both complete URLs and relative ones
if iso_url.scheme:
repo['iso_path'] = repo['install_image']
else:
repo['iso_path'] = "{repo}/{iso}".format(
repo=repo['url'], iso=repo['install_image'])
continue
if repo['os']:
# repository contains OS, but no install_image
# required for Subiquity
raise ValueError(
"Subiquity installer requires 'install_image' set "
"in repository {}".format(repo['name']))
# otherwise ubuntu has everything in /dists/
try:
root_path, comps = repo['url'].split('/dists/', 1)
except ValueError:
raise ValueError(
"Repository URL <{}> is in invalid format, no '/dists/' "
"component found".format(repo['url'])) from None
repo['apt_url'] = '{} {}'.format(
root_path, comps.replace('/', ' ')).rstrip()
# Add webhook information
# TODO: encode token with a secret instead of just passing secret
self._info['webhook'] = {
"endpoint": self._webhook_logger,
"key": self._session_id,
"token": self._session_secret
}
# fill_template_vars()
def create_autofile(self):
"""
Fill the template and create the autofile in the target location
"""
self._logger.info("generating autofile")
self._remove_autofile()
template = jinja2.Template(self._template.content)
self._logger.info(
"autotemplate will be used: '%s'", self._template.name)
autofile_content = template.render(config=self._info)
# Subiquity requires a directory to be present, we pass that
# as the autofile location. In the directory a file named
# 'user-data' must be located.
try:
os.mkdir(self._autofile_path)
except FileExistsError:
# that's fine, it's a directory, even though it should
# not exist after _remove_autofile
pass
# Write the autofile for usage during installation
# by the distro installer.
with open(self._autofile_path + "/user-data", "w") as autofile:
autofile.write(autofile_content)
with open(self._autofile_path + "/meta-data", "w") as autofile:
autofile.write("")
# Write the autofile in the directory that the state machine
# is executed.
autofile_in_jobdir = os.path.join(
self._work_dir, os.path.basename(self._autofile_path))
try:
with open(autofile_in_jobdir, "w") as autofile:
autofile.write(autofile_content)
except IsADirectoryError:
# if for some reason we store template files as is, in a directory,
# use 'user-data' name in that directory
with open(os.path.join(autofile_in_jobdir, 'user-data'),
"w") as autofile:
autofile.write(autofile_content)
# create_autofile()
def target_reboot(self):
"""
Skip reboot step, it is done automatically
"""
self._logger.info("waiting for system to reboot")
self._platform.set_boot_device(self._profile.get_boot_device())
# target_reboot()
def wait_install(self):
"""
Waits for the installation.
Creates a session on webhook and listens to logs.
"""
self._create_webhook_session()
self._read_events()
# wait_install()
# SmDebianInstaller
|
import unittest
import pandas as pd
from unittest.mock import patch
from sfdxmagic.functions import parse_magic_invocation, execute_query, execute_apex
class TestInvocation(unittest.TestCase):
def test_supports_out_var(self):
args = parse_magic_invocation("var -a -b")
assert args["variable"] == "var", "should recognize the variable"
assert args["sfdx_args"] == "-a -b", "should recognize the rest"
def test_only_sfdx_params(self):
args = parse_magic_invocation("-a -b")
assert args["variable"] == None, "should recognize no variable"
assert args["sfdx_args"] == "-a -b", "should recognize the rest"
@patch("sfdxmagic.functions.execute_sfdx")
class TestQuery(unittest.TestCase):
def test_base(self, execute_sfdx):
execute_sfdx.return_value = {
"status": 0,
"result": {"records": [{"Id": "12345", "attributes": {}}]},
}
result = execute_query("-u user", "SELECT Id FROM Account LIMIT 1")
execute_sfdx.assert_called_with(
'force:data:soql:query -q "SELECT Id FROM Account LIMIT 1" -u user'
)
assert type(result) == pd.DataFrame
assert result.empty == False
def test_no_rows(self, execute_sfdx):
execute_sfdx.return_value = {"status": 0, "result": {"records": []}}
result = execute_query("-u user", "SELECT Id FROM Account LIMIT 1")
execute_sfdx.assert_called_with(
'force:data:soql:query -q "SELECT Id FROM Account LIMIT 1" -u user'
)
assert type(result) == pd.DataFrame
assert result.empty == True
@patch("sfdxmagic.functions.get_ipython")
def test_assign_to_scope(self, get_ipython, execute_sfdx):
execute_sfdx.return_value = {
"status": 0,
"result": {"records": [{"Id": "12345", "attributes": {}}]},
}
result = execute_query("var -u user", "SELECT Id FROM Account LIMIT 1")
get_ipython().push.assert_called()
push_args = get_ipython().push.call_args[0][0]
assert "var" in push_args, "should assign var to the scope"
assert type(push_args["var"]) == pd.DataFrame, "should be a DataFrame"
@patch("sfdxmagic.functions.execute_sfdx")
class TestAnonymousApex(unittest.TestCase):
def test_base(self, execute_sfdx):
execute_sfdx.return_value = {"status": 0, "result": {"logs": "line1\nline2"}}
result = execute_apex("-u user", "System.debug('test');")
execute_sfdx.assert_called()
sfdx_args = execute_sfdx.call_args[0][0]
assert sfdx_args.startswith("force:apex:execute"), "should call apex execution"
assert sfdx_args.endswith("-u user"), "should executed against a specific org"
assert type(result) == list
@patch("sfdxmagic.functions.get_ipython")
def test_assign_to_scope(self, get_ipython, execute_sfdx):
execute_sfdx.return_value = {"status": 0, "result": {"logs": "line1\nline2"}}
result = execute_apex("var -u user", "System.debug('test');")
get_ipython().push.assert_called()
push_args = get_ipython().push.call_args[0][0]
assert "var" in push_args, "should assign var to the scope"
assert push_args["var"] == [
"line1",
"line2",
], "should set the var to the log lines"
if __name__ == "__main__":
unittest.main()
|
import math
from arches.app.utils.date_utils import ExtendedDateFormat
from arches.app.utils.permission_backend import get_nodegroups_by_perm
from arches.app.search.elasticsearch_dsl_builder import (
Bool,
Match,
Query,
Nested,
Term,
Terms,
GeoShape,
Range,
MinAgg,
MaxAgg,
RangeAgg,
Aggregation,
GeoHashGridAgg,
GeoBoundsAgg,
FiltersAgg,
NestedAgg,
)
from arches.app.search.search_engine_factory import SearchEngineFactory
from arches.app.search.mappings import RESOURCES_INDEX
from arches.app.models.system_settings import settings
from django.core.cache import cache
class TimeWheel(object):
def time_wheel_config(self, user):
se = SearchEngineFactory().create()
query = Query(se, limit=0)
nested_agg = NestedAgg(path="dates", name="min_max_agg")
nested_agg.add_aggregation(MinAgg(field="dates.date"))
nested_agg.add_aggregation(MaxAgg(field="dates.date"))
query.add_aggregation(nested_agg)
results = query.search(index=RESOURCES_INDEX)
if (
results is not None
and results["aggregations"]["min_max_agg"]["min_dates.date"]["value"] is not None
and results["aggregations"]["min_max_agg"]["max_dates.date"]["value"] is not None
):
min_date = int(results["aggregations"]["min_max_agg"]["min_dates.date"]["value"]) / 10000
max_date = int(results["aggregations"]["min_max_agg"]["max_dates.date"]["value"]) / 10000
# round min and max date to the nearest 1000 years
min_date = math.ceil(math.fabs(min_date) / 1000) * -1000 if min_date < 0 else math.floor(min_date / 1000) * 1000
max_date = math.floor(math.fabs(max_date) / 1000) * -1000 if max_date < 0 else math.ceil(max_date / 1000) * 1000
query = Query(se, limit=0)
range_lookup = {}
def gen_range_agg(gte=None, lte=None, permitted_nodegroups=None):
date_query = Bool()
date_query.filter(Range(field="dates.date", gte=gte, lte=lte, relation="intersects"))
if permitted_nodegroups is not None:
date_query.filter(Terms(field="dates.nodegroup_id", terms=permitted_nodegroups))
date_ranges_query = Bool()
date_ranges_query.filter(Range(field="date_ranges.date_range", gte=gte, lte=lte, relation="intersects"))
if permitted_nodegroups is not None:
date_ranges_query.filter(Terms(field="date_ranges.nodegroup_id", terms=permitted_nodegroups))
wrapper_query = Bool()
wrapper_query.should(Nested(path="date_ranges", query=date_ranges_query))
wrapper_query.should(Nested(path="dates", query=date_query))
return wrapper_query
date_tiers = {
"name": "Millennium",
"interval": 1000,
"root": True,
"child": {"name": "Century", "interval": 100, "child": {"name": "Decade", "interval": 10}},
}
if abs(int(min_date) - int(max_date)) > 1000:
date_tiers = {
"name": "Millennium",
"interval": 1000,
"root": True,
"child": {"name": "Half-millennium", "interval": 500, "child": {"name": "Century", "interval": 100}},
}
if settings.TIMEWHEEL_DATE_TIERS is not None:
date_tiers = settings.TIMEWHEEL_DATE_TIERS
def add_date_tier(date_tier, low_date, high_date, previous_period_agg=None):
interval = date_tier["interval"]
name = date_tier["name"]
within_range = True
if "root" in date_tier:
high_date = int(high_date) + interval
for period in range(int(low_date), int(high_date), interval):
min_period = period
max_period = period + interval
if "range" in date_tier:
within_range = min_period >= date_tier["range"]["min"] and max_period <= date_tier["range"]["max"]
period_name = "{0} ({1} - {2})".format(name, min_period, max_period)
nodegroups = self.get_permitted_nodegroups(user) if "root" in date_tier else None
period_boolquery = gen_range_agg(
gte=ExtendedDateFormat(min_period).lower, lte=ExtendedDateFormat(max_period).lower, permitted_nodegroups=nodegroups
)
period_agg = FiltersAgg(name=period_name)
period_agg.add_filter(period_boolquery)
if "root" not in date_tier:
if within_range is True:
previous_period_agg.add_aggregation(period_agg)
range_lookup[period_name] = [min_period, max_period]
if "child" in date_tier:
add_date_tier(date_tier["child"], min_period, max_period, period_agg)
if "root" in date_tier:
query.add_aggregation(period_agg)
add_date_tier(date_tiers, min_date, max_date)
root = d3Item(name="root")
results = {"buckets": [query.search(index=RESOURCES_INDEX)["aggregations"]]}
results_with_ranges = self.appendDateRanges(results, range_lookup)
self.transformESAggToD3Hierarchy(results_with_ranges, root)
# calculate total number of docs
for child in root.children:
root.size = root.size + child.size
if user.username in settings.CACHE_BY_USER:
key = "time_wheel_config_{0}".format(user.username)
cache.set(key, root, settings.CACHE_BY_USER[user.username])
return root
def transformESAggToD3Hierarchy(self, results, d3ItemInstance):
if "buckets" not in results:
return d3ItemInstance
for key, value in results["buckets"][0].items():
if key == "from":
d3ItemInstance.start = int(value)
elif key == "to":
d3ItemInstance.end = int(value)
elif key == "doc_count":
d3ItemInstance.size = value
elif key == "key":
pass
else:
item = self.transformESAggToD3Hierarchy(value, d3Item(name=key))
# only append items if they have a document count > 0
if item.size > 0:
d3ItemInstance.children.append(item)
d3ItemInstance.children = sorted(d3ItemInstance.children, key=lambda item: item.start)
return d3ItemInstance
def appendDateRanges(self, results, range_lookup):
if "buckets" in results:
bucket = results["buckets"][0]
for key, value in bucket.items():
if key in range_lookup:
bucket[key]["buckets"][0]["from"] = range_lookup[key][0]
bucket[key]["buckets"][0]["to"] = range_lookup[key][1]
self.appendDateRanges(value, range_lookup)
return results
def get_permitted_nodegroups(self, user):
return [str(nodegroup.pk) for nodegroup in get_nodegroups_by_perm(user, "models.read_nodegroup")]
class d3Item(object):
name = ""
size = 0
start = None
end = None
children = []
def __init__(self, **kwargs):
self.name = kwargs.pop("name", "")
self.size = kwargs.pop("size", 0)
self.start = kwargs.pop("start", None)
self.end = kwargs.pop("end", None)
self.children = kwargs.pop("children", [])
|
from QGrain.artificial._sample import *
from QGrain.artificial._setting import *
LOESS = [dict(shape=(0.0, 0.10), loc=(10.2, 0.1), scale=(1.1, 0.1), weight=(1.0, 0.1)),
dict(shape=(0.0, 0.10), loc=(7.5, 0.1), scale=(1.2, 0.1), weight=(2.0, 0.1)),
dict(shape=(0.0, 0.10), loc=(5.0, 0.2), scale=(1.0, 0.1), weight=(4.0, 0.2))]
LACUSTRINE = [dict(shape=(0.0, 0.10), loc=(10.2, 0.1), scale=(1.1, 0.1), weight=(1.0, 0.1)),
dict(shape=(0.0, 0.10), loc=(7.5, 0.1), scale=(1.2, 0.1), weight=(2.0, 0.1)),
dict(shape=(0.0, 0.10), loc=(5.0, 0.2), scale=(1.0, 0.1), weight=(4.0, 0.2)),
dict(shape=(0.0, 0.10), loc=(2.2, 0.4), scale=(1.0, 0.2), weight=(3.0, 1.0))]
def get_random_dataset(target=LOESS, n_samples=100,
min_μm=0.02, max_μm=2000.0, n_classes=101,
precision=4, noise=5):
random_setting = RandomSetting(target)
params_array = random_setting.get_random_params(n_samples=n_samples)
dataset = ArtificialDataset(params_array,
min_μm=min_μm, max_μm=max_μm, n_classes=n_classes,
precision=precision, noise=noise)
return dataset
def get_random_sample(target=LOESS,
min_μm=0.02, max_μm=2000.0, n_classes=101,
precision=4, noise=5):
random_setting = RandomSetting(target)
params_array = random_setting.get_random_params(n_samples=1)
dataset = ArtificialDataset(params_array,
min_μm=min_μm, max_μm=max_μm, n_classes=n_classes,
precision=precision, noise=noise)
sample = dataset.get_sample(0)
sample.name = "Artificial Sample"
return sample
|
# Generated by Django 3.0.5 on 2020-10-28 19:51
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("api", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name="review",
name="author",
field=models.ForeignKey(
db_column="author",
on_delete=django.db.models.deletion.CASCADE,
related_name="reviews",
to=settings.AUTH_USER_MODEL,
verbose_name="Автор отзыва",
),
),
migrations.AddField(
model_name="review",
name="title",
field=models.ForeignKey(
db_column="title_id",
on_delete=django.db.models.deletion.CASCADE,
related_name="reviews",
to="api.Title",
verbose_name="Произведение",
),
),
migrations.AddField(
model_name="comment",
name="author",
field=models.ForeignKey(
db_column="author",
on_delete=django.db.models.deletion.CASCADE,
related_name="comments",
to=settings.AUTH_USER_MODEL,
verbose_name="Автор комментария",
),
),
migrations.AddField(
model_name="comment",
name="review",
field=models.ForeignKey(
db_column="review_id",
on_delete=django.db.models.deletion.CASCADE,
related_name="comments",
to="api.Review",
verbose_name="Отзыв",
),
),
]
|
positive_rate_expr = ('a*exp(b*V)', ('a', 'b'))
negative_rate_expr = ('a*exp(-b*V)', ('a', 'b'))
constant_rate_expr = ('k', ('k'))
|
"""
Schemas for the Need Extension of Open-Needs Server
"""
from pydantic import BaseModel
from typing import Dict, Union, Optional
class NeedBaseSchema(BaseModel):
key: str
type: str
title: str
description: str | None
format: str | None
project_id: int
options: Dict[str, Union[float, str]] | None
references: Dict[str, list[str]] | None
class NeedFilterSchema(NeedBaseSchema):
title: Optional[str]
description: Optional[str] | None = None
project_id: Optional[int]
options: Dict[str, Union[float, str]] | None = None
references: Dict[str, list[str]] | None = None
class NeedCreateSchema(NeedBaseSchema):
title: str
description: str | None = None
project_id: int
options: Dict[str, Union[float, str]] | None = {}
references: Dict[str, list[str]] | None = {}
class NeedUpdateSchema(NeedBaseSchema):
title: Optional[str]
description: Optional[str]
project_id: Optional[int] | None
options: Optional[Dict[str, Union[float, str]]]
references: Optional[Dict[str, list[str]]]
class NeedReturnSchema(NeedBaseSchema):
options: Dict[str, Union[float, str]]
references: Dict[str, list[str]]
class Config:
orm_mode = True
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# string_trie_pattern_search_implement.py
# python
#
# 🎂"Here's to the crazy ones. The misfits. The rebels.
# The troublemakers. The round pegs in the square holes.
# The ones who see things differently. They're not found
# of rules. And they have no respect for the status quo.
# You can quote them, disagree with them, glority or vilify
# them. About the only thing you can't do is ignore them.
# Because they change things. They push the human race forward.
# And while some may see them as the creazy ones, we see genius.
# Because the poeple who are crazy enough to think thay can change
# the world, are the ones who do."
#
# Created by Chyi Yaqing on 02/27/19 10:33.
# Copyright © 2019. Chyi Yaqing.
# All rights reserved.
#
# Distributed under terms of the
# MIT
"""
In computer science, a trie, also called digital tree, radix tree or prefix
tree, is a kind of search tree - an ordered tree data structure used to store a
dynamic set or associative array where the keys are usually strings.
Trie | (Delete)
During delete operation we delete the key in bottom up manner using recursion.
The following are possible conditions when deleting key from trie.
1) Key may not be there in trie. Delete operation should not modify trie
2) Key present as unique key(no part of key contains another key (prefix),
not the key itself is prefix of another key in trie). Delete all the nodes.
3) Key is prefix key of another long key in trie. Unmark the leaf node.
4) Key present in trie, having atlest one other key as prefix key. Delete
nodes from end of key until first leaf node of longest prefix key.
"""
# Python program for insert and search operation in a Trie
class TrieNode:
# Trie node class
def __init__(self):
self.children = [None]*26
# isEndOdWord is True if node represent the end of the word
self.isEndOfWord = False
# Trie树是一种非常独特的、高效的字符串匹配算法
class Trie:
# Trie data structure class
def __init__(self):
# 存储无意义字符
self.root = self.getNode()
def getNode(self):
# Returns new trie node (initialized to NULLs)
return TrieNode()
def _charToIndex(self, ch):
# Private helper function Converts key current character into index
# use only 'a' through 'z' and lower case
return ord(ch) - ord('a')
def insert(self, key):
# If not present, inserts key into trie If the key is prefix of trie
# node, just marks leaf node
pCrawl = self.root
length = len(key)
for level in range(length):
index = self._charToIndex(key[level])
# if current character is not present
if not pCrawl.children[index]:
pCrawl.children[index] = self.getNode()
pCrawl = pCrawl.children[index]
# Mark last node as leaf
pCrawl.isEndOfWord = True
# Returns true if root has no children, else False
def isEmpty(self, root):
for i in range(26):
if root.children[i]:
return False
return True
# Recursive function to delete a key from given Trie
def delete(self, root, key, depth=0):
# If tree is empty
if (self.isEmpty(root)):
return
# If last character of key is being processed
if depth == len(key):
# This node is no more end of word after removal of given key
if root.isEndOfWord:
root.isEndOfWord = False
# If given is not prefix of any other word
if self.isEmpty(root):
del root
root = None
return root
# if not last character, recur for the child obtained using ASCII value
index = self._charToIndex(key[depth])
root.children[index] = self.delete(root.children[index], key, depth+1)
# If root does not have any child (its only child got deleted), and
# it is not end of another word.
if (self.isEmpty(root) and root.isEndOfWord is False):
del root
root = None
return root
def search(self, key):
# Search key in the trie Returns true if key presents
# in trie, else false
pCrawl = self.root
length = len(key)
for level in range(length):
index = self._charToIndex(key[level])
if not pCrawl.children[index]:
return False
pCrawl = pCrawl.children[index]
return pCrawl is not None and pCrawl.isEndOfWord
# driver function
def main():
# Input keys (use only 'a' through 'z' and lower case)
keys = ["the", "a", "there", "anaswe", "any", "by", "their"]
output = ["Not present in trie", "Present in tire"]
# Trie object
t = Trie()
# Construct trie
for key in keys:
t.insert(key)
# Search for different keys
print("{} ---- {}".format("the", output[t.search("the")]))
t.delete(t.root, "the")
print("{} ---- {}".format("the", output[t.search("the")]))
print("{} ---- {}".format("these", output[t.search("these")]))
print("{} ---- {}".format("their", output[t.search("their")]))
print("{} ---- {}".format("thaw", output[t.search("thaw")]))
if __name__ == '__main__':
main()
|
"""Some small utils."""
import logging
import os
import webbrowser
from distutils import spawn # pylint: disable=no-name-in-module
from typing import List
from PIL import Image
LOG = logging.getLogger(__name__)
def open_browser() -> None: # pragma: no cover
"""Open app in browser."""
LOG.info('Open browser...')
webbrowser.open('http://127.0.0.1:5000')
# pylint: disable=too-few-public-methods
class ChDir(object):
"""Contextmanager to temporary change dir."""
def __init__(self, new_dir: str) -> None:
self.old_dir: str = os.getcwd()
self.new_dir: str = new_dir
def __enter__(self):
LOG.debug('enter %s...', self.new_dir)
os.chdir(self.new_dir)
def __exit__(self, *args):
LOG.debug('enter %s again...', self.old_dir)
os.chdir(self.old_dir)
def commands_available(commands: List[str]) -> bool:
"""Check if needed commands are available."""
return all([spawn.find_executable(command) for command in commands])
def is_blank(image_file: str) -> bool:
"""Checks if image is blank.
Thanks to:
https://stackoverflow.com/a/18778280
https://www.splitbrain.org/blog/2014-08/24-paper_backup_2_automation_scripts
"""
image = Image.open(image_file)
black_white = image.point(lambda x: 0 if x < 128 else 255, '1')
black = black_white.histogram()[0]
white = black_white.histogram()[-1]
if black / white < 0.005:
return True
return False
|
# Create an Dictionary Object called currentBook that has three attributes:
# Title , Author , Price
# Print out the dictionary object
# Print just the author of the currentBook
# Create a new attribute called ISBN (with some value)
# Print out all the values in the currentBook (using for loop)
currentBook = {
"Title":"Harry Potter eats his dinner",
"Author": "Just Kidding Rowling",
"Price": 12
}
# print dictionary object
print (currentBook)
# print just the author
print (currentBook["Author"])
# create and set a attribute ISBN
currentBook["ISBN"] = "123455"
# user for loop to iterate through the currentBook's values
# notice the order the for loop gives the values.
print("the current values has these values:")
for value in currentBook.values():
print(" => {}".format(value))
|
from skimviz import start
start()
|
from pypy.conftest import gettestobjspace
class AppTestNumpy:
def setup_class(cls):
cls.space = gettestobjspace(usemodules=['micronumpy'])
def test_imports(self):
try:
import numpy # fails if 'numpypy' was not imported so far
except ImportError:
pass
import numpypy
import numpy # works after 'numpypy' has been imported
|
import os, codecs
import pandas as pd
import numpy as np
%pylab inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg')
from matplotlib import font_manager as fm, rcParams
import matplotlib.pyplot as plt
!ls ../input/ -l
PATH = '../input/'
def bike_fence_format(s):
s = s.replace('[', '').replace(']', '').split(',')
s = np.array(s).astype(float).reshape(5, -1)
return s
# 共享单车停车点位(电子围栏)数据
bike_fence = pd.read_csv(PATH + 'gxdc_tcd.csv')
bike_fence['FENCE_LOC'] = bike_fence['FENCE_LOC'].apply(bike_fence_format)
# 共享单车订单数据
bike_order = pd.read_csv(PATH + 'gxdc_dd.csv')
bike_order = bike_order.sort_values(['BICYCLE_ID', 'UPDATE_TIME'])
import geohash
bike_order['geohash'] = bike_order.apply(lambda x:
geohash.encode(x['LATITUDE'], x['LONGITUDE'], precision=9), axis=1)
from geopy.distance import geodesic
bike_fence['MIN_LATITUDE'] = bike_fence['FENCE_LOC'].apply(lambda x: np.min(x[:, 1]))
bike_fence['MAX_LATITUDE'] = bike_fence['FENCE_LOC'].apply(lambda x: np.max(x[:, 1]))
bike_fence['MIN_LONGITUDE'] = bike_fence['FENCE_LOC'].apply(lambda x: np.min(x[:, 0]))
bike_fence['MAX_LONGITUDE'] = bike_fence['FENCE_LOC'].apply(lambda x: np.max(x[:, 0]))
bike_fence['FENCE_AREA'] = bike_fence.apply(lambda x: geodesic(
(x['MIN_LATITUDE'], x['MIN_LONGITUDE']), (x['MAX_LATITUDE'], x['MAX_LONGITUDE'])
).meters, axis=1)
bike_fence['FENCE_CENTER'] = bike_fence['FENCE_LOC'].apply(
lambda x: np.mean(x[:-1, ::-1], 0)
)
import geohash
bike_order['geohash'] = bike_order.apply(
lambda x: geohash.encode(x['LATITUDE'], x['LONGITUDE'], precision=6),
axis=1)
bike_fence['geohash'] = bike_fence['FENCE_CENTER'].apply(
lambda x: geohash.encode(x[0], x[1], precision=6)
)
# bike_order
geohash.encode(24.521156, 118.140385, precision=6), \
geohash.encode(24.521156, 118.140325, precision=6)
bike_order['UPDATE_TIME'] = pd.to_datetime(bike_order['UPDATE_TIME'])
bike_order['DAY'] = bike_order['UPDATE_TIME'].dt.day.astype(object)
bike_order['DAY'] = bike_order['DAY'].apply(str)
bike_order['HOUR'] = bike_order['UPDATE_TIME'].dt.hour.astype(object)
bike_order['HOUR'] = bike_order['HOUR'].apply(str)
bike_order['HOUR'] = bike_order['HOUR'].str.pad(width=2,side='left',fillchar='0')
bike_order['DAY_HOUR'] = bike_order['DAY'] + bike_order['HOUR']
bike_inflow = pd.pivot_table(bike_order[bike_order['LOCK_STATUS'] == 1],
values='LOCK_STATUS', index=['geohash'],
columns=['DAY_HOUR'], aggfunc='count', fill_value=0
)
bike_outflow = pd.pivot_table(bike_order[bike_order['LOCK_STATUS'] == 0],
values='LOCK_STATUS', index=['geohash'],
columns=['DAY_HOUR'], aggfunc='count', fill_value=0
)
bike_inflow.loc['wsk52r'].plot()
bike_outflow.loc['wsk52r'].plot()
plt.xticks(list(range(bike_inflow.shape[1])), bike_inflow.columns, rotation=40)
plt.legend(['Inflow', 'OutFlow'])
bike_inflow.loc['wsk596'].plot()
bike_outflow.loc['wsk596'].plot()
plt.xticks(list(range(bike_inflow.shape[1])), bike_inflow.columns, rotation=40)
plt.legend(['Inflow', 'OutFlow'])
bike_inflow = pd.pivot_table(bike_order[bike_order['LOCK_STATUS'] == 1],
values='LOCK_STATUS', index=['geohash'],
columns=['DAY'], aggfunc='count', fill_value=0
)
bike_outflow = pd.pivot_table(bike_order[bike_order['LOCK_STATUS'] == 0],
values='LOCK_STATUS', index=['geohash'],
columns=['DAY'], aggfunc='count', fill_value=0
)
bike_remain = (bike_inflow - bike_outflow).fillna(0)
bike_remain[bike_remain < 0] = 0
bike_remain = bike_remain.sum(1)
bike_fence['DENSITY'] = bike_fence['geohash'].map(bike_remain).fillna(0)
import hnswlib
import numpy as np
p = hnswlib.Index(space='l2', dim=2)
p.init_index(max_elements=300000, ef_construction=1000, M=32)
p.set_ef(1024)
p.set_num_threads(14)
p.add_items(np.stack(bike_fence['FENCE_CENTER'].values))
index, dist = p.knn_query(bike_order[['LATITUDE','LONGITUDE']].values[:], k=1)
bike_order['fence'] = bike_fence.iloc[index.flatten()]['FENCE_ID'].values
bike_inflow = pd.pivot_table(bike_order[bike_order['LOCK_STATUS'] == 1],
values='LOCK_STATUS', index=['fence'],
columns=['DAY'], aggfunc='count', fill_value=0
)
bike_outflow = pd.pivot_table(bike_order[bike_order['LOCK_STATUS'] == 0],
values='LOCK_STATUS', index=['fence'],
columns=['DAY'], aggfunc='count', fill_value=0
)
bike_remain = (bike_inflow - bike_outflow).fillna(0)
bike_remain[bike_remain < 0] = 0
bike_remain = bike_remain.sum(1)
# bike_fence = bike_fence.set_index('FENCE_ID')
bike_density = bike_remain / bike_fence.set_index('FENCE_ID')['FENCE_AREA']
bike_density = bike_density.sort_values(ascending=False).reset_index()
bike_density = bike_density.fillna(0)
bike_density['label'] = '0'
bike_density.iloc[:100, -1] = '1'
bike_density['BELONG_AREA'] ='厦门'
bike_density = bike_density.drop(0, axis=1)
bike_density.columns = ['FENCE_ID', 'FENCE_TYPE', 'BELONG_AREA']
bike_density.to_csv('result.txt', index=None, sep='|')
|
from tir import Webapp
import unittest
import time
class GTPA004(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGAGTP', '15/04/2020', 'T1', 'D MG 01 ', '88')
inst.oHelper.Program('GTPA004')
def test_GTPA004_CT001(self):
print("test_GTPA004_CT001 - Visualizar")
self.oHelper.SearchBrowse('D MG 00000000009', key=1, index=True)
self.oHelper.SetButton('Visualizar')
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
def test_GTPA004_CT002(self):
print("test_GTPA004_CT002 - Alterar")
self.oHelper.SearchBrowse('D MG 00000000009', key=1, index=True)
self.oHelper.SetButton('Outras Ações', 'Alterar')
time.sleep(2)
self.oHelper.SetButton('Sim')
time.sleep(2)
self.oHelper.SetValue('Lotação', '55')
self.oHelper.SetButton('Confirmar')
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
def test_GTPA004_CT003(self):
print("test_GTPA004_CT003 - Excluir")
self.oHelper.SearchBrowse('D MG 00000000009', key=1, index=True)
self.oHelper.SetButton('Outras Ações', 'Excluir')
self.oHelper.SetButton('Confirmar')
time.sleep(2)
self.oHelper.SetButton('Sim')
time.sleep(2)
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
def test_GTPA004_CT004(self):
print("test_GTPA004_CT004 - Incluir")
self.oHelper.SetButton('Incluir')
self.oHelper.SetBranch('D MG')
self.oHelper.SetValue('Cód. Linha', '000001')
self.oHelper.SetValue('Sentido', '1')
self.oHelper.SetValue('Hora Inicio', '10:00')
time.sleep(2)
self.oHelper.SetValue('Vigência de', '01/05/2020')
self.oHelper.SetValue('Vigência até', '30/05/2020')
self.oHelper.SetValue('Lotação', '50')
self.oHelper.ClickCheckBox('Segunda', 1)
self.oHelper.ClickCheckBox('Terça', 1)
self.oHelper.ClickCheckBox('Quarta', 1)
self.oHelper.ClickCheckBox('Quinta', 1)
self.oHelper.ClickCheckBox('Sexta', 1)
self.oHelper.ClickCheckBox('Sábado', 1)
self.oHelper.ClickCheckBox('Domingo', 1)
self.oHelper.SetButton('Outras Ações', 'Seleção de Localidade')
self.oHelper.SetButton('Confirmar')
time.sleep(2)
self.oHelper.SetButton('Fechar')
self.oHelper.ClickGridCell("Hora destino", row=1, grid_number=1)
self.oHelper.SetValue("Hora destino","12:00",grid=True, grid_number=1)
self.oHelper.ClickGridCell("Tempo Exec.", row=1, grid_number=1)
self.oHelper.SetValue("Tempo Exec.","01:00",grid=True, grid_number=1)
self.oHelper.LoadGrid()
time.sleep(2)
self.oHelper.SetButton('Confirmar')
time.sleep(2)
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
|
import base64
import os
import yaml
from flask import current_app
from github import Github, GithubException
from che_marketplace_backend.plugin import Plugin
METADATA_FILENAME = 'meta.yaml'
# Name of the repository on Github
PLUGIN_REGISTRY = 'eclipse/che-plugin-registry'
class Repository():
def __init__(self):
self.plugins = []
# This seems to be lazy, so it will pass regardless the content of the token
self.g = Github(os.environ['CHE_PLUGIN_DEV_TOKEN'])
self.repo = None
def _fetch_plugin_dirs(self):
self.repo = self.g.get_repo(PLUGIN_REGISTRY)
plugins_unfiltered = self.repo.get_contents("plugins")
plugins_dirs = filter(lambda x: x.type == "dir", plugins_unfiltered)
return plugins_dirs
def _fetch_latest_version(self, dir):
versions = self.repo.get_contents(dir.path)
# TODO: sort and get the latest (but there is no plugin with multiple versions, so currently
# TODO: I don't know how to sort it), also we need some versioning strategy (like semver)
return versions[0]
def _fetch_metadata(self, plugin_path):
metadata_file = self.repo.get_contents(plugin_path.path + '/' + METADATA_FILENAME)
try:
text = base64.b64decode(metadata_file.content).decode('utf-8')
except base64.binascii.Error:
# The input string contains letters that does not belong to the b64 alphabet
current_app.logger.error("could not decode b64 encoded file at path " + str(metadata_file))
return None
except UnicodeError:
# Error while decoding the byte stream, not valid UTF-8
current_app.logger.error("could not decode UTF-8 encoded content of file at path "
+ str(metadata_file))
return None
dict = yaml.safe_load(text)
return Plugin.from_dict(dict) if dict is not None else None
def fetch_plugins_from_github(self):
current_app.logger.info("Running fetch plugins from Github")
try:
plugins_dirs = self._fetch_plugin_dirs()
latest_versions = map(lambda x: self._fetch_latest_version(x), plugins_dirs)
ret = list(filter(lambda x: x is not None,
map(lambda x: self._fetch_metadata(x), latest_versions)))
except GithubException:
current_app.logger.exception("Unhandled exception in the code dealing with Github API:")
return []
return ret |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-17 10:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bims', '0107_biologicalcollectionrecord_source_collection'),
('sass', '0025_auto_20190117_1007'),
]
operations = [
migrations.CreateModel(
name='SASS5Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField(blank=True, help_text=b'Count', null=True)),
('stone_and_rock', models.IntegerField(blank=True, help_text=b'Stone and Rock (S)', null=True, verbose_name=b'Stone and Rock (S)')),
('vegetation', models.IntegerField(blank=True, help_text=b'Vegetation (Veg)', null=True, verbose_name=b'Vegetation (Veg)')),
('gravel_sand_mud', models.IntegerField(blank=True, help_text=b'Gravel Sand Mud (GSM)', null=True, verbose_name=b'Gravel Sand Mud (GSM)')),
('family', models.ForeignKey(blank=True, help_text=b'Taxonomy Family', null=True, on_delete=django.db.models.deletion.SET_NULL, to='bims.Taxonomy', verbose_name=b'Family')),
('sass_sheet', models.ForeignKey(help_text=b'SASS Sheet', on_delete=django.db.models.deletion.CASCADE, to='sass.SASS5Sheet', verbose_name=b'SASS Sheet')),
],
options={
'verbose_name': 'SASS 5 Record',
'verbose_name_plural': 'SASS 5 Records',
},
),
]
|
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import numpy as np
class ListStats(object):
"""
>>> test_list = [1, 2, 3, 4, 5, 11, 12, 13, 14, 15]
>>> ListStats.total_variation(test_list)
1.5555555555555556
>>> np.mean(test_list)
8.0
>>> np.median(test_list)
8.0
>>> ListStats.lp_norm(test_list, 1.0)
8.0
>>> ListStats.lp_norm(test_list, 3.0)
10.507175744985801
>>> ListStats.perc5(test_list)
1.4500000000000002
>>> ListStats.perc20(test_list)
2.8000000000000003
>>> ListStats.nonemean([None, None, 1, 2])
1.5
>>> ListStats.nonemean([3, 4, 1, 2])
2.5
>>> ListStats.nonemean([None, None, None])
nan
"""
"""
The following tests don't render numbers with same precision in py2 vs py3:
>> ListStats.print_stats(test_list)
Min: 1, Max: 15, Median: 8.0, Mean: 8.0, Variance: 27.0, Total_variation: 1.55555555556
>> ListStats.print_moving_average_stats(test_list, 3)
Min: 2.67984333217, Max: 13.6798433322, Median: 4.64565264023, Mean: 6.61976499826, Variance: 18.625918874, Total_variation: 1.22222222222
The following tests need review
>> ListStats.moving_average(test_list, 2)
array([ 2.26894142, 2.26894142, 2.26894142, 3.26894142,
4.26894142, 6.61364853, 11.26894142, 12.26894142,
13.26894142, 14.26894142])
>> ListStats.moving_average(test_list, 5)
array([ 4.08330969, 4.08330969, 4.08330969, 4.08330969,
4.08330969, 4.08330969, 5.81552983, 7.7557191 ,
9.96294602, 12.51305607])
>> ListStats.harmonic_mean(test_list)
4.5222635212015483
>> ListStats.lp_norm(test_list, 2.0)
9.5393920141694561
>> ListStats.perc1(test_list)
1.0900000000000001
>> ListStats.perc10(test_list)
1.8999999999999999
"""
@staticmethod
def total_variation(my_list):
abs_diff_scores = np.absolute(np.diff(my_list))
return np.mean(abs_diff_scores)
@staticmethod
def moving_average(my_list, n, type='exponential', decay=-1):
"""
compute an n period moving average.
:param my_list:
:param n:
:param type: 'simple' | 'exponential'
:param decay:
:return:
"""
x = np.asarray(my_list)
if type == 'simple':
weights = np.ones(n)
elif type == 'exponential':
weights = np.exp(np.linspace(decay, 0., n))
else:
assert False, "Unknown type: {}.".format(type)
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a
@staticmethod
def harmonic_mean(my_list):
return 1.0 / np.mean(1.0 / (np.array(my_list) + 1.0)) - 1.0
@staticmethod
def lp_norm(my_list, p):
return np.power(np.mean(np.power(np.array(my_list), p)), 1.0 / p)
@staticmethod
def perc1(my_list):
return np.percentile(my_list, 1)
@staticmethod
def perc5(my_list):
return np.percentile(my_list, 5)
@staticmethod
def perc10(my_list):
return np.percentile(my_list, 10)
@staticmethod
def perc20(my_list):
return np.percentile(my_list, 20)
@staticmethod
def print_stats(my_list):
print("Min: {min}, Max: {max}, Median: {median}, Mean: {mean}," \
" Variance: {var}, Total_variation: {total_var}".format(
min=np.min(my_list), max=np.max(my_list),
median=np.median(my_list), mean=np.mean(my_list),
var=np.var(my_list),
total_var=ListStats.total_variation(my_list)))
@staticmethod
def print_moving_average_stats(my_list, n, type='exponential', decay=-1):
moving_avg_list = ListStats.moving_average(my_list, n, type, decay)
ListStats.print_stats(moving_avg_list)
@staticmethod
def nonemean(my_list):
return np.mean(list(filter(lambda x: x is not None, my_list)))
if __name__ == '__main__':
import doctest
doctest.testmod()
|
import logging
import os
import string
import struct
class SimResult:
def __init__(self, path):
self._logger = logging.getLogger(__name__)
if os.path.isfile(path):
self._Path = path
self._vol = dict()
self._cur = dict()
self._logger.debug('SimResult Object (%s) created', path)
else:
self._logger.error('File %s not found', path)
raise FileNotFoundError("Couldn't find file: " + path)
def readMetadata(self):
self._logger.debug('readMetadata called')
self._logger.debug('\t reading from: %s', self._Path)
self._metadata = dict()
with open(self._Path, "rb") as rawFile:
for line in rawFile:
lineDec = line.decode('utf-8')
spLine = lineDec.split(":", 1)
if (len(spLine) != 2):
raise ValueError('Couldn''t parse line: ' + lineDec.strip() + ' of file: ' + self._Path)
if spLine[0] == "Binary":
break
elif spLine[0] == "Variables":
break
else:
self._metadata[spLine[0].strip()] = spLine[1].strip()
i = 1
for line in rawFile:
lineDec = line.decode('utf-8')
spLine = lineDec.split(None,2)
if spLine[2].strip() == 'time':
self._timeIndex = int(spLine[0])
elif spLine[2].strip() == 'voltage':
self._vol[spLine[1].strip()] = int(spLine[0])
elif spLine[2].strip() == 'device_current':
self._cur[spLine[1].strip()] = int(spLine[0])
else:
raise ValueError('Couldn''t parse variable: '+spLine[2].strip())
if i >= int(self._metadata["No. Variables"]):
break
else:
i = i + 1
def getPath(self):
self._logger.debug('getPath called')
return self._Path
def printMetaData(self):
self._logger.debug('printMetaData called')
for key in self._metadata:
print(key, ': ', self._metadata[key])
def getSimDate(self):
self._logger.debug('getSimDate called')
return self._metadata['Date']
def getNoVars(self):
self._logger.debug('getNoVars called')
return int(self._metadata["No. Variables"])
def getNoPoints(self):
self._logger.debug('getNoPoints called')
return int(self._metadata["No. Points"])
def getTime(self):
with open(self._Path, "rb") as rawFile:
for line in rawFile:
lineDec = line.decode('utf-8')
spLine = lineDec.split(":", 1)
if spLine[0] == "Binary":
break
nPoint = int(self._metadata["No. Points"])
nVars = int(self._metadata["No. Variables"])
t = []
for i in range(0,nPoint):
t.append(abs(struct.unpack('d', rawFile.read(8))[0]))
rawFile.seek(4*(nVars-1), os.SEEK_CUR)
return t
def getVoltage(self,node):
varName = 'V('+node+')'
varIndex = int(self._vol[varName])
with open(self._Path, "rb") as rawFile:
for line in rawFile:
lineDec = line.decode('utf-8')
spLine = lineDec.split(":", 1)
if spLine[0] == "Binary":
break
nPoint = int(self._metadata["No. Points"])
nVars = int(self._metadata["No. Variables"])
vol = []
for i in range(0,nPoint):
rawFile.seek(8+4*(varIndex - 1), os.SEEK_CUR)
vol.append(struct.unpack('f', rawFile.read(4))[0])
rawFile.seek(4*(nVars-varIndex-1), os.SEEK_CUR)
return vol
def getCurrent(self,node):
varName = 'I('+node+')'
varIndex = int(self._cur[varName])
with open(self._Path, "rb") as rawFile:
for line in rawFile:
lineDec = line.decode('utf-8')
spLine = lineDec.split(":", 1)
if spLine[0] == "Binary":
break
nPoint = int(self._metadata["No. Points"])
nVars = int(self._metadata["No. Variables"])
cur = []
for i in range(0,nPoint):
rawFile.seek(8+4*(varIndex - 1), os.SEEK_CUR)
cur.append(struct.unpack('f', rawFile.read(4))[0])
rawFile.seek(4*(nVars-varIndex-1), os.SEEK_CUR)
return cur
|
import numpy as np
from matplotlib import pyplot as plt
from amical.analysis import pymask
def pymask_grid(input_data, ngrid=40, pa_prior=[0, 360], sep_prior=[0, 100], cr_prior=[1, 150],
err_scale=1., extra_error_cp=0., ncore=1, verbose=False):
cpo = pymask.cpo(input_data)
like_grid = pymask.coarse_grid(cpo, nsep=ngrid, nth=ngrid, ncon=ngrid, thmin=pa_prior[0], thmax=pa_prior[1],
smin=sep_prior[0], smax=sep_prior[1], cmin=cr_prior[0], cmax=cr_prior[1],
threads=ncore, err_scale=err_scale, extra_error=extra_error_cp, verbose=verbose)
return like_grid
def pymask_mcmc(input_data, initial_guess, niters=1000, pa_prior=[0, 360], sep_prior=None, cr_prior=None,
err_scale=1, extra_error_cp=0, ncore=1, burn_in=500, walkers=100, display=True,
verbose=True):
cpo = pymask.cpo(input_data)
hammer_data = pymask.hammer(cpo, ivar=initial_guess, niters=niters, model='constant', nwalcps=walkers,
sep_prior=sep_prior, pa_prior=pa_prior, crat_prior=cr_prior,
err_scale=err_scale, extra_error=extra_error_cp, plot=display,
burn_in=burn_in, threads=ncore)
res_corner = hammer_data[1]
chain = hammer_data[0]['chain']
dm = 2.5*np.log10(res_corner['cr'])
dmm = 2.5*np.log10(res_corner['cr']-res_corner['delcrm'])
dmp = 2.5*np.log10(res_corner['cr']+res_corner['delcrp'])
e_dmm = abs(dm - dmm)
e_dmp = abs(dm - dmp)
if verbose:
print('MCMC estimation')
print('---------------')
print('Separation = %2.1f +%2.1f/-%2.1f mas' %
(res_corner['sep'], res_corner['delsepp'], res_corner['delsepm']))
print('PA = %2.1f +%2.1f/-%2.1f deg' %
(res_corner['pa'], res_corner['delpap'], res_corner['delpam']))
print('Contrast Ratio = %2.1f +%2.1f/-%2.1f' %
(res_corner['cr'], res_corner['delcrp'], res_corner['delcrm']))
print('dm = %2.2f +%2.2f/-%2.2f mag' % (dm, e_dmp, e_dmm))
chain_sep = chain[:, :, 0].T
chain_th = chain[:, :, 1].T
chain_cr = chain[:, :, 2].T
if display:
sep = res_corner['sep']
pa = res_corner['pa']
cr = res_corner['cr']
plt.figure(figsize=(5, 7))
plt.subplot(3, 1, 1)
plt.plot(chain_sep, color='grey', alpha=.5)
plt.plot(len(chain_sep), sep, marker='*', color='#0085ca', zorder=1e3)
plt.ylabel('Separation [mas]')
plt.subplot(3, 1, 2)
plt.plot(chain_th, color='grey', alpha=.5)
plt.plot(len(chain_sep), pa, marker='*', color='#0085ca', zorder=1e3)
plt.ylabel('PA [deg]')
plt.subplot(3, 1, 3)
plt.plot(chain_cr, color='grey', alpha=.2)
plt.plot(len(chain_sep), cr, marker='*', color='#0085ca', zorder=1e3)
plt.xlabel('Step')
plt.ylabel('CR')
plt.tight_layout()
plt.show(block=False)
res = {'best': {'model': 'binary',
'dm': dm,
'theta': res_corner['pa'],
'sep': res_corner['sep'],
'x0': 0,
'y0': 0},
'uncer': {'dm_p': e_dmp,
'dm_m': e_dmm,
'theta_p': res_corner['delpap'],
'theta_m': res_corner['delpam'],
'sep_p': res_corner['delsepp'],
'sep_m': res_corner['delsepm']},
}
return res
def pymask_cr_limit(input_data, nsim=100, err_scale=1, extra_error_cp=0, ncore=1, cmax=500,
nsep=60, ncrat=60, nth=30, smin=20, smax=250, cmin=1.0001):
cpo = pymask.cpo(input_data)
lims_data = pymask.detec_limits(cpo, threads=ncore, nsim=nsim,
nsep=nsep, ncon=ncrat, nth=nth,
smax=smax, cmax=cmax, cmin=cmin,
smin=smin,
err_scale=err_scale, extra_error=extra_error_cp)
limits = lims_data['limits']
seps = lims_data['seps']
crats = lims_data['cons']
crat_limits = 0*seps
# Loop through seps and find the highest contrast ratio that would be detectable
for sep_ix in range(len(seps)):
would_detec = limits[:, sep_ix] >= 0.9973
if np.sum(would_detec) >= 1:
threesig_lim = np.max(crats[would_detec])
crat_limits[sep_ix] = threesig_lim # fivesig_lim
else:
crat_limits[sep_ix] = 1.
con_limits = 2.5*np.log10(crat_limits)
plt.figure()
plt.plot(seps, con_limits)
plt.xlabel('Separation [mas]')
plt.ylabel('$\Delta \mathrm{Mag}_{3\sigma}$')
plt.title("PYMASK: flux ratio for 3$\sigma$ detection")
plt.ylim(plt.ylim()[1], plt.ylim()[0]) # -- rreverse plot
plt.tight_layout()
res = {'r': seps,
'cr_limit': con_limits,
'lims_data': lims_data
}
return res
|
import base64
import hashlib
import binascii
from huaweisms.api.common import common_headers, ApiCtx, post_to_url, get_from_url
from .config import API_URL
def b64_sha256(data: str) -> str:
s256 = hashlib.sha256()
s256.update(data.encode('utf-8'))
dg = s256.digest()
hs256 = binascii.hexlify(dg)
return base64.urlsafe_b64encode(hs256).decode('utf-8', 'ignore')
def login(ctx: ApiCtx, user_name: str, password: str):
headers = common_headers()
url = "{}/user/login".format(API_URL)
# original JS code:
# psd = base64encode(
# SHA256(
# name +
# base64encode(
# SHA256($('#password').val())
# ) +
# g_requestVerificationToken[0]
# )
# );
password_value = b64_sha256(user_name + b64_sha256(password) + ctx.token)
xml_data = """
<?xml version:"1.0" encoding="UTF-8"?>
<request>
<Username>{}</Username>
<Password>{}</Password>
<password_type>4</password_type>
</request>
""".format(user_name, password_value)
# setup headers
headers['__RequestVerificationToken'] = ctx.token
headers['X-Requested-With'] = 'XMLHttpRequest'
r = post_to_url(url, xml_data, ctx, headers)
if r['type'] == "response" and r['response'] == "OK":
ctx.logged_in = True
return r
def state_login(ctx: ApiCtx):
url = "{}/user/state-login".format(API_URL)
return get_from_url(url, ctx)
|
from src import *
def Main():
try:
while True:
time.sleep(7200)
check_if_connection_exist(network_path_server, password_for_network_path_server)
time.sleep(5)
path_for_G = create_path_folder(initial_path_for_G)
os.mkdir(path_for_G)
time.sleep(5)
download_files(IP, ftp_login_username, ftp_login_password, ftp_initial_store_file, path_for_G)
if time.strftime("%H", time.localtime()) == "22" and time.strftime("%M", time.localtime()) == "00":
time.sleep(32400)
print("repeat")
if time.strftime("%d", time.localtime()) == "30" and time.strftime("%H", time.localtime()) == "17" and time.strftime("%M", time.localtime()) == "30":
time.sleep(63)
except:
print("A CAUSA DI UN ERRORE STO ASPETTANDO IL RIAVVIO")
Main() |
a=int(input())
l=[]
for i in range(0,a):
l.append(int(input()))
l.sort(reverse = True)
print(l[0])
|
from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT
from cassandra.policies import DCAwareRoundRobinPolicy, TokenAwarePolicy, DowngradingConsistencyRetryPolicy, ConsistencyLevel, RoundRobinPolicy
from cassandra.query import tuple_factory
from pyspark.conf import SparkConf
from pyspark.sql import SparkSession, SQLContext
from pyspark import SparkContext
from pyspark.sql import functions as F
import pandas as pd
from pyspark.sql.types import StructType,StructField, StringType, IntegerType, FloatType, DateType, LongType
from numpy.random import rand
from tqdm import tqdm
import random
import os
## Setup Python Driver parameters
IPS = ['172.19.0.2']
profile = ExecutionProfile(
load_balancing_policy=TokenAwarePolicy(DCAwareRoundRobinPolicy("datacenter1")),
retry_policy=DowngradingConsistencyRetryPolicy(), ##CHECK
consistency_level=ConsistencyLevel.LOCAL_QUORUM,
serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL, ##CHECK LWT
request_timeout=20000,
row_factory=tuple_factory
)
cluster = Cluster(IPS,execution_profiles={EXEC_PROFILE_DEFAULT: profile})
session = cluster.connect()
session = cluster.connect('tpcds')
## Starting Spark
spark = SparkSession\
.builder\
.appName("TPCDS-Scylla")\
.config("setMaster","172.19.0.2")\
.config("spark.jars", "target/scala-2.12/spark3-scylla4-example-assembly-0.1.jar")\
.config("spark.cassandra.connection.host", "172.19.0.2")\
.config('spark.cassandra.output.consistency.level','LOCAL_QUORUM')\
.config("spark.driver.memory", "28g")\
.config("spark.executor.memory", "28g")\
.getOrCreate()
sc = spark.sparkContext
## Start SQL Context, it will enable you to run SQL Queries
sqlContext = SQLContext(spark)
# Reading and Registering Scylla Tables on Spark
call_center = spark.read.format("org.apache.spark.sql.cassandra").options(table="call_center", keyspace="tpcds").load()
call_center.registerTempTable("call_center")
catalog_page = spark.read.format("org.apache.spark.sql.cassandra").options(table="catalog_page", keyspace="tpcds").load()
catalog_page.registerTempTable("catalog_page")
catalog_returns = spark.read.format("org.apache.spark.sql.cassandra").options(table="catalog_returns", keyspace="tpcds").load()
catalog_returns.registerTempTable("catalog_returns")
catalog_sales = spark.read.format("org.apache.spark.sql.cassandra").options(table="catalog_sales", keyspace="tpcds").load()
catalog_sales.registerTempTable("catalog_sales")
customer = spark.read.format("org.apache.spark.sql.cassandra").options(table="customer", keyspace="tpcds").load()
customer.registerTempTable("customer")
customer_address = spark.read.format("org.apache.spark.sql.cassandra").options(table="customer_address", keyspace="tpcds").load()
customer_address.registerTempTable("customer_address")
customer_demographics = spark.read.format("org.apache.spark.sql.cassandra").options(table="customer_demographics", keyspace="tpcds").load()
customer_demographics.registerTempTable("customer_demographics")
date_dim = spark.read.format("org.apache.spark.sql.cassandra").options(table="date_dim", keyspace="tpcds").load()
date_dim.registerTempTable("date_dim")
household_demographics = spark.read.format("org.apache.spark.sql.cassandra").options(table="household_demographics", keyspace="tpcds").load()
household_demographics.registerTempTable("household_demographics")
income_band = spark.read.format("org.apache.spark.sql.cassandra").options(table="income_band", keyspace="tpcds").load()
income_band.registerTempTable("income_band")
inventory = spark.read.format("org.apache.spark.sql.cassandra").options(table="inventory", keyspace="tpcds").load()
inventory.registerTempTable("inventory")
item = spark.read.format("org.apache.spark.sql.cassandra").options(table="item", keyspace="tpcds").load()
item.registerTempTable("item")
promotion = spark.read.format("org.apache.spark.sql.cassandra").options(table="promotion", keyspace="tpcds").load()
promotion.registerTempTable("promotion")
reason = spark.read.format("org.apache.spark.sql.cassandra").options(table="reason", keyspace="tpcds").load()
reason.registerTempTable("reason")
ship_mode = spark.read.format("org.apache.spark.sql.cassandra").options(table="ship_mode", keyspace="tpcds").load()
ship_mode.registerTempTable("ship_mode")
store = spark.read.format("org.apache.spark.sql.cassandra").options(table="store", keyspace="tpcds").load()
store.registerTempTable("store")
store_returns = spark.read.format("org.apache.spark.sql.cassandra").options(table="store_returns", keyspace="tpcds").load()
store_returns.registerTempTable("store_returns")
store_sales = spark.read.format("org.apache.spark.sql.cassandra").options(table="store_sales", keyspace="tpcds").load()
store_sales.registerTempTable("store_sales")
time_dim = spark.read.format("org.apache.spark.sql.cassandra").options(table="time_dim", keyspace="tpcds").load()
time_dim.registerTempTable("time_dim")
warehouse = spark.read.format("org.apache.spark.sql.cassandra").options(table="warehouse", keyspace="tpcds").load()
warehouse.registerTempTable("warehouse")
web_page = spark.read.format("org.apache.spark.sql.cassandra").options(table="web_page", keyspace="tpcds").load()
web_page.registerTempTable("web_page")
web_returns = spark.read.format("org.apache.spark.sql.cassandra").options(table="web_returns", keyspace="tpcds").load()
web_returns.registerTempTable("web_returns")
web_sales = spark.read.format("org.apache.spark.sql.cassandra").options(table="web_sales", keyspace="tpcds").load()
web_sales.registerTempTable("web_sales")
web_site = spark.read.format("org.apache.spark.sql.cassandra").options(table="web_site", keyspace="tpcds").load()
web_site.registerTempTable("web_site")
## Generating Real-Time Data for Web Sales
## DDL
# create table web_sales
# (# ws_sold_date_sk int,
# ws_sold_time_sk int,
# ws_ship_date_sk int,
# ws_item_sk int,
# ws_bill_customer_sk int,
# ws_bill_cdemo_sk int,
# ws_bill_hdemo_sk int,
# ws_bill_addr_sk int,
# ws_ship_customer_sk int,
# ws_ship_cdemo_sk int,
# ws_ship_hdemo_sk int,
# ws_ship_addr_sk int,
# ws_web_page_sk int,
# ws_web_site_sk int,
# ws_ship_mode_sk int,
# ws_warehouse_sk int,
# ws_promo_sk int,
# ws_order_number int,
# ws_quantity int,
# ws_wholesale_cost double,
# ws_list_price double,
# ws_sales_price double,
# ws_ext_discount_amt double,
# ws_ext_sales_price double,
# ws_ext_wholesale_cost double,
# ws_ext_list_price double,
# ws_ext_tax double,
# ws_coupon_amt double,
# ws_ext_ship_cost double,
# ws_net_paid double,
# ws_net_paid_inc_tax double,
# ws_net_paid_inc_ship double,
# ws_net_paid_inc_ship_tax double,
# ws_net_profit double
# , junk text, PRIMARY KEY ((ws_item_sk, ws_order_number))
# );
##Creating relationship between foreign keys and tables
array = [["i_item_sk", "item"],["c_customer_sk","customer"],["cd_demo_sk","customer_demographics"],["hd_demo_sk","household_demographics"],["ca_address_sk","customer_address"],["wp_web_page_sk","web_page"],["web_site_sk","web_site"],["sm_ship_mode_sk","ship_mode"],["w_warehouse_sk","warehouse"]]
queries=[]
## Since the size of the environment can change, we need to check what are the Max keys that can be used
## to generate data that it will return data from queries. For this, we will be using Spark to get the MAX of each key.
for column, tables in array:
query = 'select max({}) as max from {}'.format(column,tables)
print(query)
query = sqlContext.sql(query)
queries.append([query,column])
## Creating Variables for the max of each key
for query,column in queries:
result = [int(row.max) for row in query.collect()][0]
#print(result)
#print("max_{} = random.randrange(int({}))".format(column,result))
exec("max_{} = int({})".format(column,result))
columns = ["ws_item_sk","ws_order_number","ws_sold_date_sk","ws_sold_time_sk","ws_ship_date_sk","ws_bill_customer_sk","ws_bill_cdemo_sk","ws_bill_hdemo_sk","ws_bill_addr_sk","ws_ship_customer_sk","ws_ship_cdemo_sk","ws_ship_hdemo_sk","ws_ship_addr_sk","ws_web_page_sk","ws_web_site_sk","ws_ship_mode_sk","ws_warehouse_sk","ws_coupon_amt","ws_ext_discount_amt","ws_ext_list_price","ws_ext_sales_price","ws_ext_ship_cost","ws_ext_tax","ws_ext_wholesale_cost","ws_list_price","ws_net_paid","ws_net_paid_inc_ship","ws_net_paid_inc_ship_tax","ws_net_paid_inc_tax","ws_net_profit","ws_promo_sk","ws_quantity","ws_sales_price","junk"]
string = "?" * len(columns)
## Prepare statement
prepared_stmt = "insert into web_sales ("+",".join(columns) +") values ("+ ",".join(string) +") "
prepared_stmt = session.prepare(prepared_stmt)
print(prepared_stmt)
## Number of Transactions that you would like o run
number_of_transactions = 5000000
## Generating Random Data
for transaction in tqdm(range(number_of_transactions)):
ws_item_sk = random.randrange(max_i_item_sk)
ws_order_number = random.randrange(number_of_transactions)
ws_sold_date_sk = random.randrange(2459216,2459580) # 1y of data between 2021-01-01 and 2021-12-31
ws_sold_time_sk = random.randrange(86399) #max(t_time_sk)
ws_ship_date_sk = random.randrange(ws_sold_date_sk,2459580) ## ship_date should be bigger than sold_date
ws_bill_customer_sk = random.randrange(max_c_customer_sk)
ws_bill_cdemo_sk = random.randrange(max_cd_demo_sk)
ws_bill_hdemo_sk = random.randrange(max_hd_demo_sk)
ws_bill_addr_sk = random.randrange(max_ca_address_sk)
ws_ship_customer_sk = random.randrange(max_sm_ship_mode_sk)
ws_ship_cdemo_sk = random.randrange(max_cd_demo_sk)
ws_ship_hdemo_sk = random.randrange(max_hd_demo_sk)
ws_ship_addr_sk = random.randrange(max_ca_address_sk)
ws_web_page_sk = random.randrange(max_wp_web_page_sk)
ws_web_site_sk = random.randrange(max_web_site_sk)
ws_ship_mode_sk = random.randrange(max_sm_ship_mode_sk)
ws_warehouse_sk = random.randrange(max_w_warehouse_sk)
ws_coupon_amt=random.randrange(9999)
ws_ext_discount_amt=random.randrange(9999)
ws_ext_list_price=random.randrange(9999)
ws_ext_sales_price=random.randrange(9999)
ws_ext_ship_cost=random.randrange(9999)
ws_ext_tax=random.randrange(9999)
ws_ext_wholesale_cost=random.randrange(9999)
ws_list_price=random.randrange(9999)
ws_net_paid=random.randrange(9999)
ws_net_paid_inc_ship=random.randrange(9999)
ws_net_paid_inc_ship_tax=random.randrange(9999)
ws_net_paid_inc_tax=random.randrange(9999)
ws_net_profit=random.randrange(9999)
ws_promo_sk=random.randrange(9999)
ws_quantity=random.randrange(9999)
ws_sales_price=random.randrange(9999)
ws_wholesale_cost=random.randrange(9999)
junk = 'None'
row = [ws_item_sk,ws_order_number,ws_sold_date_sk,ws_sold_time_sk,ws_ship_date_sk,ws_bill_customer_sk,\
ws_bill_cdemo_sk,ws_bill_hdemo_sk,ws_bill_addr_sk,ws_ship_customer_sk,ws_ship_cdemo_sk,\
ws_ship_hdemo_sk,ws_ship_addr_sk,ws_web_page_sk,ws_web_site_sk,ws_ship_mode_sk,ws_warehouse_sk,\
ws_coupon_amt,ws_ext_discount_amt,ws_ext_list_price,ws_ext_sales_price,ws_ext_ship_cost,ws_ext_tax,\
ws_ext_wholesale_cost,ws_list_price,ws_net_paid,ws_net_paid_inc_ship,ws_net_paid_inc_ship_tax,\
ws_net_paid_inc_tax,ws_net_profit,ws_promo_sk,ws_quantity,ws_sales_price,junk]
# Executing queries
query_data = session.execute_async(prepared_stmt,row)
# for cell in row:
# print(cell)
#print(str(ws_sold_date_sk) + "|" + str(ws_sold_time_sk) + "|" + str(ws_ship_date_sk) + "|" + str(ws_item_sk))
|
import numpy as np
import xml.etree.ElementTree as ET
class WindTurbines():
"""Set of multiple type wind turbines"""
def __init__(self, names, diameters, hub_heights, ct_funcs, power_funcs, power_unit):
"""Initialize WindTurbines
Parameters
----------
names : array_like
Wind turbine names
diameters : array_like
Diameter of wind turbines
hub_heights : array_like
Hub height of wind turbines
ct_funcs : list of functions
Wind turbine ct functions; func(ws) -> ct
power_funcs : list of functions
Wind turbine power functions; func(ws) -> power
power_unit : {'W', 'kW', 'MW', 'GW'}
Unit of power_func output (case insensitive)
"""
self._names = np.array(names)
self._diameters = np.array(diameters)
self._hub_heights = np.array(hub_heights)
self.ct_funcs = ct_funcs
self.power_scale = {'w': 1, 'kw': 1e3, 'mw': 1e6, 'gw': 1e9}[power_unit.lower()]
if self.power_scale != 1:
self.power_funcs = list([lambda ws, f=f: f(ws) * self.power_scale for f in power_funcs])
else:
self.power_funcs = power_funcs
def _info(self, var, types):
return var[np.asarray(types, int)]
def hub_height(self, types=0):
"""Hub height of the specified type(s) of wind turbines
"""
return self._info(self._hub_heights, types)
def diameter(self, types=0):
"""Rotor diameter of the specified type(s) of wind turbines
"""
return self._info(self._diameters, types)
def name(self, types=0):
"""Name of the specified type(s) of wind turbines
"""
return self._info(self._names, types)
def power(self, ws_i, type_i=0):
"""Power in watt
Parameters
----------
ws_i : array_like, shape (i,...)
Wind speed
type_i : int or array_like, shape (i,)
wind turbine type
Returns
-------
power : array_like
Power production for the specified wind turbine type(s) and wind speed
"""
return self._ct_power(ws_i, type_i)[1]
def ct(self, ws_i, type_i=0):
"""Thrust coefficient
Parameters
----------
ws_i : array_like, shape (i,...)
Wind speed
type_i : int or array_like, shape (i,)
wind turbine type
Returns
-------
ct : array_like
Thrust coefficient for the specified wind turbine type(s) and wind speed
"""
return self._ct_power(ws_i, type_i)[0]
def get_defaults(self, N, type_i=0, h_i=None, d_i=None):
"""
Parameters
----------
N : int
number of turbines
type_i : array_like or None, optional
Turbine type. If None, all turbines is type 0
h_i : array_like or None, optional
hub heights. If None: default hub heights (set in WindTurbines)
d_i : array_lie or None, optional
Rotor diameter. If None: default diameter (set in WindTurbines)
"""
type_i = np.zeros(N, dtype=int) + type_i
if h_i is None:
h_i = self.hub_height(type_i)
elif isinstance(h_i, (int, float)):
h_i = np.zeros(N) + h_i
if d_i is None:
d_i = self.diameter(type_i)
elif isinstance(d_i, (int, float)):
d_i = np.zeros(N) + d_i
return np.asarray(type_i), np.asarray(h_i), np.asarray(d_i)
def _ct_power(self, ws_i, type_i=0):
ws_i = np.asarray(ws_i)
if np.any(type_i != 0):
CT = np.zeros_like(ws_i, dtype=np.float)
P = np.zeros_like(ws_i, dtype=np.float)
type_i = np.zeros(ws_i.shape[0]) + type_i
for t in np.unique(type_i).astype(np.int):
m = type_i == t
CT[m] = self.ct_funcs[t](ws_i[m])
P[m] = self.power_funcs[t](ws_i[m])
return CT, P
else:
return self.ct_funcs[0](ws_i), self.power_funcs[0](ws_i)
def plot(self, x, y, types=None, wd=None, yaw=0, ax=None):
"""Plot wind farm layout including type name and diameter
Parameters
----------
x : array_like
x position of wind turbines
y : array_like
y position of wind turbines
types : int or array_like
type of the wind turbines
wd : int, float, array_like or None
- if int, float or array_like: wd is assumed to be the wind direction(s) and a line\
indicating the perpendicular rotor is plotted.
- if None: An circle indicating the rotor diameter is plotted
ax : pyplot or matplotlib axes object, default None
"""
import matplotlib.pyplot as plt
if types is None:
types = np.zeros_like(x)
if ax is None:
ax = plt.gca()
markers = np.array(list("213v^<>o48spP*hH+xXDd|_"))
colors = ['gray', 'k', 'r', 'g', 'k'] * 5
from matplotlib.patches import Circle
assert len(x) == len(y)
types = (np.zeros_like(x) + types).astype(int) # ensure same length as x
yaw = np.zeros_like(x) + yaw
for i, (x_, y_, d, t, yaw_) in enumerate(zip(x, y, self.diameter(types), types, yaw)):
if wd is None or len(np.atleast_1d(wd)) > 3:
circle = Circle((x_, y_), d / 2, ec=colors[t], fc="None")
ax.add_artist(circle)
plt.plot(x_, y_, 'None', )
else:
for wd_ in np.atleast_1d(wd):
c, s = np.cos(np.deg2rad(90 + wd_ - yaw_)), np.sin(np.deg2rad(90 + wd_ - yaw_))
ax.plot([x_ - s * d / 2, x_ + s * d / 2], [y_ - c * d / 2, y_ + c * d / 2], lw=1, color=colors[t])
for t, m, c in zip(np.unique(types), markers, colors):
# ax.plot(np.asarray(x)[types == t], np.asarray(y)[types == t], '%sk' % m, label=self._names[int(t)])
ax.plot([], [], '2', color=c, label=self._names[int(t)])
for i, (x_, y_, d) in enumerate(zip(x, y, self.diameter(types))):
ax.annotate(i, (x_ + d / 2, y_ + d / 2), fontsize=7)
ax.legend(loc=1)
ax.axis('equal')
@staticmethod
def from_WindTurbines(wt_lst):
"""Generate a WindTurbines object from a list of (Onetype)WindTurbines
Parameters
----------
wt_lst : array_like
list of (OneType)WindTurbines
"""
def get(att):
lst = []
for wt in wt_lst:
lst.extend(getattr(wt, att))
return lst
return WindTurbines(*[get(n) for n in ['_names', '_diameters', '_hub_heights',
'ct_funcs', 'power_funcs']],
power_unit='w')
@staticmethod
def from_WAsP_wtg(wtg_file, power_unit='W'):
""" Parse the one/multiple .wtg file(s) (xml) to initilize an
WindTurbines object.
Parameters
----------
wtg_file : string or a list of string
A string denoting the .wtg file, which is exported from WAsP.
Returns
-------
an object of WindTurbines.
Note: it is assumed that the power_unit inside multiple .wtg files
is the same, i.e., power_unit.
"""
if not isinstance(wtg_file, list):
wtg_file_list = [wtg_file]
else:
wtg_file_list = wtg_file
names = []
diameters = []
hub_heights = []
ct_funcs = []
power_funcs = []
for wtg_file in wtg_file_list:
tree = ET.parse(wtg_file)
root = tree.getroot()
# Reading data from wtg_file
name = root.attrib['Description']
diameter = np.float(root.attrib['RotorDiameter'])
hub_height = np.float(root.find('SuggestedHeights').find('Height').text)
ws_cutin = np.float(root.find('PerformanceTable').find('StartStopStrategy').attrib['LowSpeedCutIn'])
ws_cutout = np.float(root.find('PerformanceTable').find('StartStopStrategy').attrib['HighSpeedCutOut'])
i_point = 0
for DataPoint in root.iter('DataPoint'):
i_point = i_point + 1
ws = np.float(DataPoint.attrib['WindSpeed'])
Ct = np.float(DataPoint.attrib['ThrustCoEfficient'])
power = np.float(DataPoint.attrib['PowerOutput'])
if i_point == 1:
dt = np.array([[ws, Ct, power]])
else:
dt = np.append(dt, np.array([[ws, Ct, power]]), axis=0)
rated_power = np.max(dt[:, 2])
ws = dt[:, 0]
ct = dt[:, 1]
power = dt[:, 2]
names.append(name)
diameters.append(diameter)
hub_heights.append(hub_height)
ct_funcs.append(lambda u, ws=ws, ct=ct: np.interp(u, ws, ct, left=0, right=0))
power_funcs.append(lambda u, ws=ws, power=power: np.interp(u, ws, power, left=0, right=0))
return WindTurbines(names=names, diameters=diameters,
hub_heights=hub_heights, ct_funcs=ct_funcs,
power_funcs=power_funcs, power_unit=power_unit)
class OneTypeWindTurbines(WindTurbines):
"""Set of wind turbines (one type, i.e. all wind turbines have same name, diameter, power curve etc"""
def __init__(self, name, diameter, hub_height, ct_func, power_func, power_unit):
"""Initialize OneTypeWindTurbine
Parameters
----------
name : str
Wind turbine name
diameter : int or float
Diameter of wind turbine
hub_height : int or float
Hub height of wind turbine
ct_func : function
Wind turbine ct function; func(ws) -> ct
power_func : function
Wind turbine power function; func(ws) -> power
power_unit : {'W', 'kW', 'MW', 'GW'}
Unit of power_func output (case insensitive)
"""
WindTurbines.__init__(self, [name], [diameter], [hub_height],
[ct_func],
[power_func],
power_unit)
@staticmethod
def from_tabular(name, diameter, hub_height, ws, power, ct, power_unit):
return OneTypeWindTurbines(name=name, diameter=diameter, hub_height=hub_height,
ct_func=lambda u, ws=ws, ct=ct: np.interp(u, ws, ct),
power_func=lambda u, ws=ws, power=power: np.interp(u, ws, power),
power_unit=power_unit)
def cube_power(ws_cut_in=3, ws_cut_out=25, ws_rated=12, power_rated=5000):
def power_func(ws):
ws = np.asarray(ws)
power = np.zeros_like(ws, dtype=np.float)
m = (ws >= ws_cut_in) & (ws < ws_rated)
power[m] = power_rated * ((ws[m] - ws_cut_in) / (ws_rated - ws_cut_in))**3
power[(ws >= ws_rated) & (ws <= ws_cut_out)] = power_rated
return power
return power_func
def dummy_thrust(ws_cut_in=3, ws_cut_out=25, ws_rated=12, ct_rated=8 / 9):
# temporary thrust curve fix
def ct_func(ws):
ws = np.asarray(ws)
ct = np.zeros_like(ws, dtype=np.float)
if ct_rated > 0:
# ct = np.ones_like(ct)*ct_rated
m = (ws >= ws_cut_in) & (ws < ws_rated)
ct[m] = ct_rated
idx = (ws >= ws_rated) & (ws <= ws_cut_out)
# second order polynomial fit for above rated
ct[idx] = np.polyval(np.polyfit([ws_rated, (ws_rated + ws_cut_out) / 2,
ws_cut_out], [ct_rated, 0.4, 0.03], 2), ws[idx])
return ct
return ct_func
def main():
if __name__ == '__main__':
import os.path
import matplotlib.pyplot as plt
from py_wake.examples.data import wtg_path
wts = WindTurbines(names=['tb1', 'tb2'],
diameters=[80, 120],
hub_heights=[70, 110],
ct_funcs=[lambda ws: ws * 0 + 8 / 9,
dummy_thrust()],
power_funcs=[cube_power(ws_cut_in=3, ws_cut_out=25, ws_rated=12, power_rated=2000),
cube_power(ws_cut_in=3, ws_cut_out=25, ws_rated=12, power_rated=3000)],
power_unit='kW')
ws = np.arange(25)
plt.figure()
plt.plot(ws, wts.power(ws, 0), label=wts.name(0))
plt.plot(ws, wts.power(ws, 1), label=wts.name(1))
plt.legend()
plt.show()
plt.figure()
plt.plot(ws, wts.ct(ws, 0), label=wts.name(0))
plt.plot(ws, wts.ct(ws, 1), label=wts.name(1))
plt.legend()
plt.show()
plt.figure()
wts.plot([0, 100], [0, 100], [0, 1])
plt.xlim([-50, 150])
plt.ylim([-50, 150])
plt.show()
# Exmaple using two wtg files to initialize a wind turbine
# vestas_v80_wtg = './examples/data/Vestas-V80.wtg'
# NEG_2750_wtg = './examples/data/NEG-Micon-2750.wtg'
# data_folder = Path('./examples/data/')
# vestas_v80_wtg = data_folder / 'Vestas-V80.wtg'
# NEG_2750_wtg = data_folder / 'NEG-Micon-2750.wtg'
vestas_v80_wtg = os.path.join(wtg_path, 'Vestas-V80.wtg')
NEG_2750_wtg = os.path.join(wtg_path, 'NEG-Micon-2750.wtg')
wts_wtg = WindTurbines.from_WAsP_wtg([vestas_v80_wtg, NEG_2750_wtg])
ws = np.arange(30)
plt.figure()
plt.plot(ws, wts_wtg.power(ws, 0), label=wts_wtg.name(0))
plt.plot(ws, wts_wtg.power(ws, 1), label=wts_wtg.name(1))
plt.legend()
plt.show()
plt.figure()
plt.plot(ws, wts_wtg.ct(ws, 0), label=wts_wtg.name(0))
plt.plot(ws, wts_wtg.ct(ws, 1), label=wts_wtg.name(1))
plt.legend()
plt.show()
main()
|
from typing import Any, Dict
from . import caching as __caching, storage
from . import logging as __logging
from .infrastructure import page as __page
def get_page(template_path: str, data: Dict[str, Any] = {}) -> str:
from markdown_subtemplate.exceptions import InvalidOperationException
log = __logging.get_log()
if not storage.is_initialized():
msg = "Storage engine is not initialized."
log.error("engine.get_page: " + msg)
raise InvalidOperationException(msg)
log.verbose(f"engine.get_page: Getting page content for {template_path}")
return __page.get_page(template_path, data)
def clear_cache():
log = __logging.get_log()
cache = __caching.get_cache()
item_count = cache.count()
cache.clear()
log.info(f"engine.clear_cache: Cache cleared, reclaimed {item_count:,} items.")
|
import os
from dataflows import Flow, update_resource, set_primary_key, dump_to_sql
from dgp.core import BaseDataGenusProcessor
from dgp.config.consts import CONFIG_URL, CONFIG_TAXONOMY_ID
class ConfigStorerDGP(BaseDataGenusProcessor):
def __init__(self, config, context, lazy_engine):
super().__init__(config, context)
self.lazy_engine = lazy_engine
self.inner_publish_flow = lambda *_: None
def collate_values(self, fields):
def func(row):
return dict((f, row[f]) for f in fields)
return func
def flow(self):
TARGET = 'configurations'
saved_config = self.config._unflatten()
saved_config.setdefault('publish', {})['allowed'] = False
return Flow(
[
dict(
source=self.config.get(CONFIG_URL),
snippets=[
'{}: {}'.format(
self.config.get(CONFIG_TAXONOMY_ID),
os.path.basename(self.config.get(CONFIG_URL))
)
],
config=saved_config,
)
],
update_resource(-1, name=TARGET),
set_primary_key(['source']),
dump_to_sql(
dict([
(TARGET, {
'resource-name': TARGET,
'mode': 'update'
})
]),
engine=self.lazy_engine(),
),
)
|
import itertools as it
import numpy as np
import pandas as pd
from scipy import stats
from statsmodels.stats.multitest import multipletests
def read_all_chimeric_counts(h5_fns, sample_names, normalise=True):
chimeric_counts = {}
all_gene_non_chimeric_counts = {}
for sample, h5_fn in zip(sample_names, h5_fns):
chimeric_counts[sample] = pd.read_hdf(h5_fn, key='chimera_counts')
all_gene_non_chimeric_counts[sample] = pd.read_hdf(h5_fn, key='non_chimeric_counts')
norm_factors = pd.read_hdf(h5_fn, key='norm_factors')
if normalise:
chimeric_counts[sample] /= norm_factors
all_gene_non_chimeric_counts[sample] /= norm_factors
chimeric_counts = pd.concat(
chimeric_counts, axis=1,
sort=True, names=['sample', 'boot'])
all_gene_non_chimeric_counts = pd.concat(
all_gene_non_chimeric_counts, axis=1,
sort=True, names=['sample', 'boot'])
downstream_genes = {(chimera, strand): downstream for
chimera, downstream, strand
in chimeric_counts.index}
chimeric_counts = chimeric_counts.groupby(level=(0, 2), axis=0).sum()
non_chimeric_counts = all_gene_non_chimeric_counts.loc[chimeric_counts.index].copy()
counts = pd.concat(
{'chimeric': chimeric_counts,
'nonchimeric': non_chimeric_counts},
axis=1, sort=True, names=['readtype', 'sample', 'boot'],
).reorder_levels(['sample', 'readtype', 'boot'], axis=1).fillna(0)
return counts, downstream_genes
def get_bootstrap_stats(bootstraps, cond_a, cond_b):
bootstraps = bootstraps.copy() + 0.5
cond_a_ratio = (
bootstraps.loc[:, (cond_a, 'chimeric', pd.IndexSlice[:])].values /
bootstraps.loc[:, (cond_a, 'nonchimeric', pd.IndexSlice[:])].values
)
cond_b_ratio = (
bootstraps.loc[:, (cond_b, 'chimeric', pd.IndexSlice[:])].values /
bootstraps.loc[:, (cond_b, 'nonchimeric', pd.IndexSlice[:])].values
)
ks_stat = []
ks_p_val = []
for i in range(len(bootstraps)):
ks, p_val = stats.ks_2samp(cond_a_ratio[i], cond_b_ratio[i])
ks_stat.append(ks)
ks_p_val.append(p_val)
ks_stat = np.array(ks_stat)
ks_p_val = np.array(ks_p_val)
n_boots = len(bootstraps.columns.unique(level=2))
boot_lr = {}
for n, (i, j) in enumerate(it.product(range(n_boots), repeat=2)):
cond_a_data = bootstraps.loc[:, (cond_a, pd.IndexSlice[:], i)].copy()
cond_a_data.columns = cond_a_data.columns.droplevel(0)
cond_b_data = bootstraps.loc[:, (cond_b, pd.IndexSlice[:], j)].copy()
cond_b_data.columns = cond_b_data.columns.droplevel(0)
r = ((cond_a_data['chimeric'].values / cond_a_data['nonchimeric'].values) /
(cond_b_data['chimeric'].values / cond_b_data['nonchimeric'].values))
boot_lr[n] = np.log2(r).ravel()
boot_lr = pd.DataFrame.from_dict(boot_lr)
boot_lr.index = bootstraps.index
boot_lr_res = boot_lr.quantile([0.5, 0.025, 0.975], axis=1).T
boot_lr_res.columns = ['logodds_median', 'logodds_lower_ci95', 'logodds_upper_ci95']
boot_lr_res['logodds_mean'] = boot_lr.mean(axis=1)
boot_lr_res['ks_stat'] = ks_stat
boot_lr_res['ks_p_val'] = ks_p_val
_, boot_lr_res['ks_fdr'], *_ = multipletests(boot_lr_res.ks_p_val, method='bonferroni')
return boot_lr_res
def generate_bootstrapped_logodds(h5_fns, cond_a_sample_name, cond_b_sample_name):
counts, downstream_genes = read_all_chimeric_counts(
h5_fns, [cond_a_sample_name, cond_b_sample_name], normalise=False)
median_counts = counts.groupby(level=['sample', 'readtype'], axis=1).median()
median_counts = counts.groupby(level=['sample', 'readtype'], axis=1).median()
median_counts.columns = (median_counts.columns.get_level_values(0) + '_' +
median_counts.columns.get_level_values(1))
logodds_ratios = get_bootstrap_stats(
counts, cond_a_sample_name, cond_b_sample_name)
logodds_ratios['downstream_genes'] = pd.Series(downstream_genes)
logodds_ratios = logodds_ratios.join(median_counts)
return logodds_ratios |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = ""
services_str = "/home/pi/Desktop/dashgo/src/dashgo_bringup/srv/ultrasonic.srv"
pkg_name = "dashgo_bringup"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
# Generated by Django 2.2.6 on 2019-12-16 16:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20191125_2123'),
]
operations = [
migrations.AlterField(
model_name='story',
name='initial_points',
field=models.PositiveSmallIntegerField(choices=[(1, 1), (2, 2), (3, 3), (5, 5), (8, 8), (13, 13), (21, 21)], verbose_name='Pontos iniciais da história'),
),
]
|
from pathlib import Path
import pytest
from macpie.pandas import file_to_dataframe
current_dir = Path("tests/pandas/operators/filter_by_id/").resolve()
def test_filter_by_id():
df = file_to_dataframe(current_dir / "basic.xlsx")
# ids list with invalid integer should raise ValueError
ids = [1, 2, "hello"]
with pytest.raises(ValueError):
df.mac.filter_by_id("pidn", ids)
# number of rows of filtered result should match number of ids
ids = [2, 3, "4"]
result = df.mac.filter_by_id("pidn", ids)
# result.to_excel(Path("tests/pandas/operators/filter_by_id/result.xlsx"), index=False)
assert result.mac.row_count() == 4
|
"""
None - A function that reaches the end of execution without a return statement
will always return None.
"""
def do_nothing():
pass
print(do_nothing()) # Out: None
"""
Defining a function with an arbitrary number of arguments
"""
def func(*args):
# args will be a tuple containing all values that are passed in
for i in args:
print(i)
func(1, 2, 3)
# Calling it with 3 arguments
# Out: 1
# 2
# 3
list_of_arg_values = [1, 2, 3]
func(*list_of_arg_values) # Calling it with list of values, * expands the list
# These arguments ( *list_of_arg_values) can be accessed by index, for example
# list_of_arg_values[0] will return the first argument
"""
Defining a function with an arbitrary number of keyword arguments
"""
def func(**kwargs):
# kwargs will be a dictionary containing the names as keys and the values as values.
# kwargs is a plain native python dictionary.
for name, value in kwargs.items():
print(name, value)
func(value1=1, value2=2, value3=3) # Calling it with 3 arguments
# Out: value1 1
# value2 2
# value3 3
my_dict = {'foo': 1, 'bar': 2} # Calling it with a dictionary
func(**my_dict)
# Out: foo 1
# bar 2
"""
The order of positional and keywords arguments matters:
- The positional/keyword arguments come first. (Required arguments).
- Then comes the arbitrary *arg arguments. (Optional).
- Then keyword-only arguments come next. (Required).
- Finally the arbitrary keyword **kwargs come. (Optional).
"""
# |-positional-|-optional-|---keyword-only--|-optional-|
def func(arg1, arg2=10 , *args, kwarg1, kwarg2=2, **kwargs):
pass
"""
In Python 3, you can use * alone to indicate that all subsequent
arguments must be specified as keywords. For instance the math.isclose
function in Python 3.5 and higher is defined using def math.isclose (a, b,
*, rel_tol=1e-09, abs_tol=0.0) , which means the first two arguments can be
supplied positionally but the optional third and fourth parameters can only
be supplied as keyword arguments.
""" |
"""
DATAQ USB Bulk driver level code
author: Valentyn Stadnytskyi
June 2018 - July 2020
"""
from numpy import nan, mean, std, asarray, array, concatenate, delete, round, vstack, hstack, zeros, transpose, split
from time import time, sleep
import sys
import os.path
import struct
from pdb import pm
from time import gmtime, strftime
import logging
from struct import pack, unpack
from timeit import Timer
from logging import info,warn, debug, error
import traceback
class Driver(object):
def __init__(self):
#tested dec 17, 2017
self.available_ports = []
self.dev = None
self.filters = {}
self.filters['Last Point'] = 0
self.filters['Average'] = 1
self.filters['Maximum'] = 2
self.filters['Minimum'] = 3
def init(self,idProduct = 0x2008, serial_number = ''):
"""
initialized the driver by discoving approproate device and connecting to it.
"""
self.dev = self.discover(idProduct,serial_number)
self.use_port()
info("initialization of the driver is complete")
def get_information(self):
"""
auxiliary function to retrieve information about the connected USB device (on USB level).
"""
dev_dict = {}
epi_dict = {}
if dev != None:
dev_dict['DEV:address'] = self.dev.address
dev_dict['DEV:bDeviceClass'] = self.dev.bDeviceClass
dev_dict['DEV:bDescriptorType'] = self.dev.bDescriptorType
dev_dict['DEV:bDeviceProtocol'] = self.dev.bDeviceProtocol
dev_dict['DEV:bLength'] = self.dev.bLength
dev_dict['DEV:bMaxPacketSize0'] = self.dev.bMaxPacketSize0
dev_dict['DEV:bNumConfigurations'] = dev.bNumConfigurations
dev_dict['DEV:manufacturer'] = self.dev.manufacturer
dev_dict['DEV:serial_number'] = self.dev.serial_number
dev_dict['DEV:speed'] = self.dev.speed
dev_dict['DEV:product'] = self.dev.product
#endpoint IN description
epi_dict['EPI:bmAttributes'] = self.epi.bmAttributes
epi_dict['EPI:wMaxPacketSize'] = self.epi.wMaxPacketSize
epi_dict['EPI:bSynchAddress'] = self.epi.bSynchAddress
epi_dict['EPI:bInterval'] = self.epi.bInterval
epi_dict['EPI:bEndpointAddress'] = self.epi.bEndpointAddress
epi_dict['EPI:bDescriptorType'] = self.epi.bDescriptorType
epi_dict['EPI:bInterval'] = self.epi.bInterval
epi_dict['EPI:bInterval'] = self.epi.bInterval
return dev_dic,epi_dict
def get_hardware_information(self):
"""
auxiliary function to retrieve information about connected instrument.
"""
dic = {}
dic[b'Device Manufacturer'] = self.inquire(b'info 0 \r').split(b'info 0 ')[1][1:-2]
dic[b'Device name'] = self.inquire(b'info 1 \r').split(b'info 1 ')[1][1:-1]
dic[b'Firmware version'] = self.inquire(b'info 2 \r').split(b'info 2 ')[1][1:-1]
dic[b'Serial Number'] = self.inquire(b'info 6 \r').split(b'info 6 ')[1][1:-1]
dic[b'Sample Rate Divisor'] = self.inquire(b'info 9 \r').split(b'info 9 ')[1][1:-1]
return dic
def use_port(self):
"""
configure endpoints for the USB backend.
"""
import usb.core
import usb.util
self.dev.reset()
# set the active configuration. With no arguments, the first
# configuration will be the active one
self.dev.set_configuration()
# get an endpoint instance
cfg = self.dev.get_active_configuration()
intf = cfg[(0,0)]
self.epo = usb.util.find_descriptor(
intf,
# match the first OUT endpoint
custom_match = \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_OUT)
self.epi = usb.util.find_descriptor(
intf,
# match the first IN endpoint
custom_match = \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_IN)
assert self.epo is not None
assert self.epi is not None
self.epi.wMaxPacketSize = 7200000
self.epo.wMaxPacketSize = 7200000
self.epi.bmAttributes = 1
self.epi.bInterval = 100
self.usb_buff = int(self.epi.wMaxPacketSize/100)
def discover(self, idProduct = 0x2008, serial_number = None):
"""
the function allows to discover DI-2008 device.
returns: flag if a device(devices) are found.
assigns: self.available_ports list entry
[0] - COM port namer
[1] - serial number
"""
import usb.core
flag = False
dev = usb.core.find(idVendor=0x0683, idProduct=idProduct)
if dev is None:
raise ValueError('Device not found')
flag = False
else:
if dev.serial_number[:8] == serial_number:
flag = True
else:
raise ValueError(f'Device with serial number {serial_number} is not found')
dev = None
return dev
#####Basic serial communication functions
def read(self,N = 0, timeout = 1000):
if N == 0:
usb_buff = int(self.usb_buff)
else:
usb_buff = int(N)
from time import sleep
#tested dec 17, 2017
string = b""
try:
data = self.dev.read(self.epi,usb_buff,timeout)
except:
error(traceback.format_exc())
data = ''
if len(data) != 0:
for i in data:
string += bytes([i])
return string
def readall(self):
return self.read(self.usb_buff)
def write(self,command):
try:
self.dev.write(self.epo,command)
except:
error(traceback.format_exc())
def inquire(self,command):
self.write(command)
res = self.read(self.usb_buff)
return res
def config_digital(self, number = 127, echo = False):
#tested dec 17, 2017
string = 'endo ' + str(number)+'\r'
flag = False
self.write(string)
a = self.readall()
info('%r' % a)
if echo == True:
if a == 'endo ' + str(number)+'\r':
flag = True
else:
flag = False
else:
flag = None
return flag
def set_digital(self, number = 127, echo = False):
#tested dec 17, 2017
string = 'dout ' + str(number)
flag = False
self.write(string)
if echo == True:
a = self.readall()
debug('%r' % a)
if a == 'dout ' + str(number):
flag = True
else:
flag = False
else:
flag = None
return flag
def set_analog(self, channel_list, echo = False):
"""
example:
channel_list = ['T-thrmc','5','T-thrmc','5','T-thrmc','5','T-thrmc','5','digital']
"""
import traceback
flag = False
cmd_string = ''
cmd_resp = b''
i = 0
for item in channel_list:
strngs = self.config_analog_channels(item,i)
command = 'slist '+str(i)+' '+str(strngs)
cmd_string += command+'\r'
cmd_resp += self.inquire(command)
i+=1
cmd_string+='\x00'
if cmd_resp == bytes(cmd_string, encoding = 'Latin-1'):
flag = True
if echo:
print(cmd_resp)
print(cmd_string)
return flag
def config_analog_channels(self,setting, channel):
"""
takes a string input that specifies range
"""
_config_dict_gain = {}
_config_dict_gain['0.010'] = '00101'
_config_dict_gain['0.025'] = '00100'
_config_dict_gain['0.05'] = '00011'
_config_dict_gain['0.1'] = '00010'
_config_dict_gain['0.25'] = '00001' #works
_config_dict_gain['0.5'] = '00000'
_config_dict_gain['1'] = '01101'
_config_dict_gain['2.5'] = '01100'
_config_dict_gain['5'] = '01011' #
_config_dict_gain['10'] = '01010'
_config_dict_gain['25'] = '01001'
_config_dict_gain['50'] = '01000'
_config_dict_gain['B-thrmc'] = b'10000'
_config_dict_gain['E-thrmc'] = b'10001'
_config_dict_gain['J-thrmc'] = b'10010'
_config_dict_gain['K-thrmc'] = b'10011'
_config_dict_gain['N-thrmc'] = '10100'
_config_dict_gain['R-thrmc'] = '10101'
_config_dict_gain['S-thrmc'] = '10110'
_config_dict_gain['T-thrmc'] = '10111'
_config_dict_gain['digital'] = '00000'
config_byte = str(int('000' + _config_dict_gain[setting] + '0000' + bin(channel)[2:].zfill(4),2))
return config_byte
def set_filter(self,filter = 'Average'):
"""
self.filters['Last Point'] = 0
self.filters['Average'] = 1
self.filters['Maximum'] = 2
self.filters['Minimum'] = 3
"""
num = self.filters[filter]
self.write('filter * '+str(num)+'\r')
a = self.readall()
b = self.readall()
#print('%r' % (a + b))
if echo == True:
if (a+b) == 'filter * 0'+str(num)+'\r':
flag = True
else:
flag = True
def set_packet_size(self,size = 128):
dic = {}
dic[16] = 'ps 0'
dic[32] = 'ps 1'
dic[64] = 'ps 2'
dic[128] = 'ps 3'
self.inquire(dic[size])
def set_sampling_rate(self, rate = 200, baserate = 200, dec = 0, echo = True):
"""
Integer ranges for both variable are:
4 ≤ srate ≤ 2232
1 ≤ dec ≤ 32767
The formula to calculate sample throughput rate differs by the number of enabled channels. For a single enabled analog channel:
Sample rate throughput (Hz) = 8,000 ÷ (srate × dec)
resulting in a sample throughput range of 2000 Hz at its fastest, and 0.000109 Hz, or 1 sample every 9141.99 seconds.
The formula changes when two or more analog channels are enabled:
Sample rate throughput (Hz) = 800 ÷ (srate × dec)
resulting in a sample throughput range of 200 Hz at its fastest, and 0.000011 Hz, or 1 sample every 91419.93 seconds.
"""
if dec == 0:
dec = int(baserate/rate)
flag = None
string = 'srate ' + str(int(800/baserate))
#self.ser.write('srate 6000000\r') # Supposedly sets it to read at 50kHz, Hz=60000000/srate
a = self.inquire(string)
string = 'dec ' + str(dec)
b = self.inquire(string)
#print('%r,%r' % (a,b))
if echo == True:
if a+b == 'srate '+str(int(800/baserate))+'\r\x00'+'dec '+ str(dec)+ '\r\x00':
flag = True
else:
flag = True
self.dec = dec
self.sampling_rate = rate
return flag
def start_scan(self):
info('The configured measurement(s) has(have) started')
self.write('start 0\r')
info("measurement ended")
def stop_scan(self):
self.write('stop\r')
info("measurement ended")
"""Advance function"""
def config_channels(self, rate = 200, conf_digital = 127, set_digital = 127, dec = 1, baserate = 200):
x = self.config_digital(conf_digital, echo = True)
a = self.set_digital(set_digital, echo = True)
b = self.set_analog(echo = True)
c = self.set_acquisition_rate(rate = rate, dec = dec, baserate = baserate, echo = True) #3000 gives maximum rate
return x*a*b*c
def get_readings(self, points_to_read = 64, to_read_analog = 8, to_read_digital = 1):
from struct import pack, unpack
to_read = int(to_read_analog)*2+int(to_read_digital)*2
result = self.read(to_read*points_to_read)
if b'stop' in result:
flag = False
else:
flag = True
try:
data = asarray(unpack(('h'*to_read_analog+'BB')*points_to_read,result))
except:
error(traceback.format_exc())
data = None
#analog_data = asarray(unpack('h'*to_read_analog,res[0:to_read_analog*2])) #this will be
#first N bytes (where N/2 is number of analog channels to read
#see https://docs.python.org/2/library/struct.html for 'h' description
#digital_data = array(unpack('B',res[-1:])[0])
#This how we can format the integer to binary string ---->> bin(unpack('B',res[-1])[0])[2:].zfill(7).split()
#this will unpack the very last byte as binary and make sure to
#keep the leading zero. The very first zero 'D7' byte count 18 (see manual) is ommited.
#will be shown as a string. We need to convert it to a numpy array for easier usage
try:
res = transpose(asarray(split(data,points_to_read)))
except:
error(traceback.format_exc())
res = None
return res, flag #(analog_data,digital_data)
def blink(self):
from time import sleep
for i in range(8):
self.inquire('led ' + str(i) + ' \r')
sleep(1)
self.inquire('led ' + str(7) + ' \r')
"""Test functions"""
def self_test(self):
self.inquire('led 7\r')
self.tau = 0.001
#dictionary with device parameters such as S\N, device name, ect
self.dict = {}
self.dict['Device name'] = self.inquire('info 1 \r').split('info 1 ')[1][1:-1]
self.dict['Firmware version'] = self.inquire('info 2 \r').split('info 2 ')[1][1:-1]
self.dict['Serial Number'] = self.inquire('info 6 \r').split('info 6 ')[1][1:-1]
self.dict['Sample Rate Divisor'] = self.inquire('info 9 \r').split('info 9 ')[1][1:-1]
#Useful variables
#wait time after write (before) read. This seems to be not necessary.
for i in self.dict.keys():
print('%r, %r' % (i, self.dict[i]))
print('information request complete: the DI-4108 with SN %r' %self.dict['Serial Number'])
print('%r' % self.inquire('led 2\r'))
def test1(self):
self.self_test()
self.config_channels()
self.start_scan()
while self._waiting()[0] <1:
sleep(0.001)
start_t = time()
while time()-start_t<2:
print("%0.5f %r %r" % (time()-start_t,self._waiting()[0],self.get_readings()))
self.stop_scan()
print('test 1 is Done. IN buffer has all data')
print('data = dev.get_readings()')
print('buffer waiting %r' % self._waiting()[0])
def test2(self):
self.self_test()
self.config_channels()
self.start_scan()
sleep(6)
self.stop_scan()
sleep(1)
while self._waiting()[0]>5:
print('time %r and value %r'% (time(),self.get_readings()))
print('test 2 is Done')
def test3(self):
self.self_test()
self.config_channels()
self.start_scan()
sleep(6)
self.stop_scan()
print(self.waiting())
sleep(1)
while self.waiting()[0]>5:
print(self.waiting()[0])
print(self.get_readings())
print('test 2 is Done')
def echo_test1(self):
self.config_channels()
self.start_scan()
while self.waiting()[0] <1:
sleep(0.001)
start_t = time()
while time()- start_t <1:
self.write('dout 0\r')
self.write('dout 127 \r')
sleep(0.06)
self.stop_scan()
print("%r" % self._waiting()[0])
data = self.readall()
print('%r' % data)
def performance_test1(self):
self.self_test()
self.config_channels()
self.start_scan()
while self.waiting()[0] <10000:
sleep(0.001)
start_t = time()
self.stop_scan()
print('test 1 is Done. IN buffer has all data')
print('data = dev.get_readings(10)')
print('buffer waiting %r' % self._waiting()[0])
print('t = Timer(lambda: dev.get_readings(N))')
print('print t.timeit(number = M)')
def performance_test2(self):
self.self_test()
self.config_channels()
self.start_scan()
start_t = time()
self.lst = []
time_st = time()
while time()-start_t<10:
if self._waiting()[0]> 512*16:
data = self.get_readings(512)
self.lst.append([time()-start_t,self._waiting()[0],(time() - time_st)*1000])
print("%r %0.10f" % (self._waiting()[0], (time() - time_st)*1000))
sleep(24/1000) #wait for 12.8 ms
start_t = time()
self.stop_scan()
print('time between 4 kS = %0.5f' % mean(10.0/len(self.lst)))
print('Sampling rate: %0.1f' % (512/mean(10.0/len(self.lst))))
print('test 1 is Done. IN buffer has all data')
print('data = dev.get_readings(10)')
print('buffer waiting %r' % self._waiting()[0])
print('t = Timer(lambda: dev.get_readings(N))')
print('print t.timeit(number = M)')
if __name__ == "__main__": #for testing
import logging
from tempfile import gettempdir
self = driver
logging.basicConfig(#filename=gettempdir()+'/DI_USB_BULK_LL.log',
level=logging.DEBUG, format="%(asctime)s %(levelname)s: %(message)s")
print('self.self_test()')
print('self.test1()')
print('self.test2()')
print('self.echo_test1()')
print('self.performance_test1()')
print('self.performance_test2()')
|
"""Single access point to all tools usable in hosts.
It is possible to create `HostToolsHelper` in host implementaion or
use singleton approach with global functions (using helper anyway).
"""
import avalon.api
from .lib import qt_app_context
class HostToolsHelper:
"""Create and cache tool windows in memory.
Almost all methods expect parent widget but the parent is used only on
first tool creation.
Class may also contain tools that are available only for one or few hosts.
"""
def __init__(self, parent=None):
self._log = None
# Global parent for all tools (may and may not be set)
self._parent = parent
# Prepare attributes for all tools
self._workfiles_tool = None
self._loader_tool = None
self._creator_tool = None
self._subset_manager_tool = None
self._scene_inventory_tool = None
self._library_loader_tool = None
self._look_assigner_tool = None
self._experimental_tools_dialog = None
@property
def log(self):
if self._log is None:
from openpype.api import Logger
self._log = Logger.get_logger(self.__class__.__name__)
return self._log
def get_workfiles_tool(self, parent):
"""Create, cache and return workfiles tool window."""
if self._workfiles_tool is None:
from openpype.tools.workfiles.app import (
Window, validate_host_requirements
)
# Host validation
host = avalon.api.registered_host()
validate_host_requirements(host)
workfiles_window = Window(parent=parent)
self._workfiles_tool = workfiles_window
return self._workfiles_tool
def show_workfiles(self, parent=None, use_context=None, save=None):
"""Workfiles tool for changing context and saving workfiles."""
if use_context is None:
use_context = True
if save is None:
save = True
with qt_app_context():
workfiles_tool = self.get_workfiles_tool(parent)
workfiles_tool.set_save_enabled(save)
if not workfiles_tool.isVisible():
workfiles_tool.show()
if use_context:
context = {
"asset": avalon.api.Session["AVALON_ASSET"],
"task": avalon.api.Session["AVALON_TASK"]
}
workfiles_tool.set_context(context)
# Pull window to the front.
workfiles_tool.raise_()
workfiles_tool.activateWindow()
def get_loader_tool(self, parent):
"""Create, cache and return loader tool window."""
if self._loader_tool is None:
from openpype.tools.loader import LoaderWindow
loader_window = LoaderWindow(parent=parent or self._parent)
self._loader_tool = loader_window
return self._loader_tool
def show_loader(self, parent=None, use_context=None):
"""Loader tool for loading representations."""
with qt_app_context():
loader_tool = self.get_loader_tool(parent)
loader_tool.show()
loader_tool.raise_()
loader_tool.activateWindow()
if use_context is None:
use_context = False
if use_context:
context = {"asset": avalon.api.Session["AVALON_ASSET"]}
loader_tool.set_context(context, refresh=True)
else:
loader_tool.refresh()
def get_creator_tool(self, parent):
"""Create, cache and return creator tool window."""
if self._creator_tool is None:
from openpype.tools.creator import CreatorWindow
creator_window = CreatorWindow(parent=parent or self._parent)
self._creator_tool = creator_window
return self._creator_tool
def show_creator(self, parent=None):
"""Show tool to create new instantes for publishing."""
with qt_app_context():
creator_tool = self.get_creator_tool(parent)
creator_tool.refresh()
creator_tool.show()
# Pull window to the front.
creator_tool.raise_()
creator_tool.activateWindow()
def get_subset_manager_tool(self, parent):
"""Create, cache and return subset manager tool window."""
if self._subset_manager_tool is None:
from openpype.tools.subsetmanager import SubsetManagerWindow
subset_manager_window = SubsetManagerWindow(
parent=parent or self._parent
)
self._subset_manager_tool = subset_manager_window
return self._subset_manager_tool
def show_subset_manager(self, parent=None):
"""Show tool display/remove existing created instances."""
with qt_app_context():
subset_manager_tool = self.get_subset_manager_tool(parent)
subset_manager_tool.show()
# Pull window to the front.
subset_manager_tool.raise_()
subset_manager_tool.activateWindow()
def get_scene_inventory_tool(self, parent):
"""Create, cache and return scene inventory tool window."""
if self._scene_inventory_tool is None:
from openpype.tools.sceneinventory import SceneInventoryWindow
scene_inventory_window = SceneInventoryWindow(
parent=parent or self._parent
)
self._scene_inventory_tool = scene_inventory_window
return self._scene_inventory_tool
def show_scene_inventory(self, parent=None):
"""Show tool maintain loaded containers."""
with qt_app_context():
scene_inventory_tool = self.get_scene_inventory_tool(parent)
scene_inventory_tool.show()
scene_inventory_tool.refresh()
# Pull window to the front.
scene_inventory_tool.raise_()
scene_inventory_tool.activateWindow()
def get_library_loader_tool(self, parent):
"""Create, cache and return library loader tool window."""
if self._library_loader_tool is None:
from openpype.tools.libraryloader import LibraryLoaderWindow
library_window = LibraryLoaderWindow(
parent=parent or self._parent
)
self._library_loader_tool = library_window
return self._library_loader_tool
def show_library_loader(self, parent=None):
"""Loader tool for loading representations from library project."""
with qt_app_context():
library_loader_tool = self.get_library_loader_tool(parent)
library_loader_tool.show()
library_loader_tool.raise_()
library_loader_tool.activateWindow()
library_loader_tool.refresh()
def show_publish(self, parent=None):
"""Publish UI."""
from avalon.tools import publish
publish.show(parent)
def get_look_assigner_tool(self, parent):
"""Create, cache and return look assigner tool window."""
if self._look_assigner_tool is None:
import mayalookassigner
mayalookassigner_window = mayalookassigner.App(parent)
self._look_assigner_tool = mayalookassigner_window
return self._look_assigner_tool
def show_look_assigner(self, parent=None):
"""Look manager is Maya specific tool for look management."""
from avalon import style
with qt_app_context():
look_assigner_tool = self.get_look_assigner_tool(parent)
look_assigner_tool.show()
look_assigner_tool.setStyleSheet(style.load_stylesheet())
def get_experimental_tools_dialog(self, parent=None):
"""Dialog of experimental tools.
For some hosts it is not easy to modify menu of tools. For
those cases was addded experimental tools dialog which is Qt based
and can dynamically filled by experimental tools so
host need only single "Experimental tools" button to see them.
Dialog can be also empty with a message that there are not available
experimental tools.
"""
if self._experimental_tools_dialog is None:
from openpype.tools.experimental_tools import (
ExperimentalToolsDialog
)
self._experimental_tools_dialog = ExperimentalToolsDialog(parent)
return self._experimental_tools_dialog
def show_experimental_tools_dialog(self, parent=None):
"""Show dialog with experimental tools."""
with qt_app_context():
dialog = self.get_experimental_tools_dialog(parent)
dialog.show()
dialog.raise_()
dialog.activateWindow()
def get_tool_by_name(self, tool_name, parent=None, *args, **kwargs):
"""Show tool by it's name.
This is helper for
"""
if tool_name == "workfiles":
return self.get_workfiles_tool(parent, *args, **kwargs)
elif tool_name == "loader":
return self.get_loader_tool(parent, *args, **kwargs)
elif tool_name == "libraryloader":
return self.get_library_loader_tool(parent, *args, **kwargs)
elif tool_name == "creator":
return self.get_creator_tool(parent, *args, **kwargs)
elif tool_name == "subsetmanager":
return self.get_subset_manager_tool(parent, *args, **kwargs)
elif tool_name == "sceneinventory":
return self.get_scene_inventory_tool(parent, *args, **kwargs)
elif tool_name == "lookassigner":
return self.get_look_assigner_tool(parent, *args, **kwargs)
elif tool_name == "publish":
self.log.info("Can't return publish tool window.")
elif tool_name == "experimental_tools":
return self.get_experimental_tools_dialog(parent, *args, **kwargs)
else:
self.log.warning(
"Can't show unknown tool name: \"{}\"".format(tool_name)
)
def show_tool_by_name(self, tool_name, parent=None, *args, **kwargs):
"""Show tool by it's name.
This is helper for
"""
if tool_name == "workfiles":
self.show_workfiles(parent, *args, **kwargs)
elif tool_name == "loader":
self.show_loader(parent, *args, **kwargs)
elif tool_name == "libraryloader":
self.show_library_loader(parent, *args, **kwargs)
elif tool_name == "creator":
self.show_creator(parent, *args, **kwargs)
elif tool_name == "subsetmanager":
self.show_subset_manager(parent, *args, **kwargs)
elif tool_name == "sceneinventory":
self.show_scene_inventory(parent, *args, **kwargs)
elif tool_name == "lookassigner":
self.show_look_assigner(parent, *args, **kwargs)
elif tool_name == "publish":
self.show_publish(parent, *args, **kwargs)
elif tool_name == "experimental_tools":
self.show_experimental_tools_dialog(parent, *args, **kwargs)
else:
self.log.warning(
"Can't show unknown tool name: \"{}\"".format(tool_name)
)
class _SingletonPoint:
"""Singleton access to host tools.
Some hosts don't have ability to create 'HostToolsHelper' object anc can
only register function callbacks. For those cases is created this singleton
point where 'HostToolsHelper' is created "in shared memory".
"""
helper = None
@classmethod
def _create_helper(cls):
if cls.helper is None:
cls.helper = HostToolsHelper()
@classmethod
def show_tool_by_name(cls, tool_name, parent=None, *args, **kwargs):
cls._create_helper()
cls.helper.show_tool_by_name(tool_name, parent, *args, **kwargs)
@classmethod
def get_tool_by_name(cls, tool_name, parent=None, *args, **kwargs):
cls._create_helper()
return cls.helper.get_tool_by_name(tool_name, parent, *args, **kwargs)
# Function callbacks using singleton acces point
def get_tool_by_name(tool_name, parent=None, *args, **kwargs):
return _SingletonPoint.get_tool_by_name(tool_name, parent, *args, **kwargs)
def show_tool_by_name(tool_name, parent=None, *args, **kwargs):
_SingletonPoint.show_tool_by_name(tool_name, parent, *args, **kwargs)
def show_workfiles(parent=None, use_context=None, save=None):
_SingletonPoint.show_tool_by_name(
"workfiles", parent, use_context=use_context, save=save
)
def show_loader(parent=None, use_context=None):
_SingletonPoint.show_tool_by_name(
"loader", parent, use_context=use_context
)
def show_library_loader(parent=None):
_SingletonPoint.show_tool_by_name("libraryloader", parent)
def show_creator(parent=None):
_SingletonPoint.show_tool_by_name("creator", parent)
def show_subset_manager(parent=None):
_SingletonPoint.show_tool_by_name("subsetmanager", parent)
def show_scene_inventory(parent=None):
_SingletonPoint.show_tool_by_name("sceneinventory", parent)
def show_look_assigner(parent=None):
_SingletonPoint.show_tool_by_name("lookassigner", parent)
def show_publish(parent=None):
_SingletonPoint.show_tool_by_name("publish", parent)
def show_experimental_tools_dialog(parent=None):
_SingletonPoint.show_tool_by_name("experimental_tools", parent)
|
# Another Logic
for x in range(5, 0, -1):
for y in range(5, 0, -1):
if x >= y:
print("*", end="")
else:
print(" ", end="")
print() |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 29, 2018
@author: Congjian Wang
"""
from __future__ import division, print_function , unicode_literals, absolute_import
import warnings
warnings.simplefilter('default', DeprecationWarning)
#External Modules---------------------------------------------------------------
import copy
import xarray as xr
import numpy as np
#External Modules End-----------------------------------------------------------
#Internal Modules---------------------------------------------------------------
from BaseClasses import BaseType
from utils import InputData, utils
from .PostProcessor import PostProcessor
import MessageHandler
import Files
#Internal Modules End-----------------------------------------------------------
class DataClassifier(PostProcessor):
"""
This Post-Processor performs data classification based on given classifier.
In order to use this interface post-processor, the users need to provide
two data objects, one (only PointSet is allowed) is used to construct the
classifier that will be used to label the data in the second data object
(both PointSet and HistorySet are allowed).
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
inputSpecification = super(DataClassifier, cls).getInputSpecification()
VariableInput = InputData.parameterInputFactory("variable", contentType=InputData.StringType)
VariableInput.addParam("name", InputData.StringType, True)
FunctionInput = InputData.parameterInputFactory("Function", contentType=InputData.StringType)
FunctionInput.addParam("class", InputData.StringType, True)
FunctionInput.addParam("type", InputData.StringType, True)
VariableInput.addSub(FunctionInput, InputData.Quantity.one)
LabelInput = InputData.parameterInputFactory("label",contentType=InputData.StringType)
inputSpecification.addSub(VariableInput, InputData.Quantity.one_to_infinity)
inputSpecification.addSub(LabelInput, InputData.Quantity.one)
return inputSpecification
def __init__(self, messageHandler):
"""
Constructor
@ In, messageHandler, MessageHandler, message handler object
@ Out, None
"""
PostProcessor.__init__(self, messageHandler)
self.printTag = 'POSTPROCESSOR DataClassifier'
self.mapping = {} # dictionary for mapping input space between different DataObjects {'variableName':'externalFunctionName'}
self.funcDict = {} # Contains the function to be used {'variableName':externalFunctionInstance}
self.label = None # ID of the variable which containf the label values
# assembler objects to be requested
self.addAssemblerObject('Function', 'n', True)
def initialize(self, runInfo, inputs, initDict=None):
"""
Method to initialize the DataClassifier post-processor.
@ In, runInfo, dict, dictionary of run info (e.g. working dir, etc)
@ In, inputs, list, list of inputs
@ In, initDict, dict, optional, dictionary with initialization options
@ Out, None
"""
PostProcessor.initialize(self, runInfo, inputs, initDict)
for key, val in self.mapping.items():
self.funcDict[key] = self.retrieveObjectFromAssemblerDict('Function',val[1])
def _localReadMoreXML(self, xmlNode):
"""
Method to read the portion of the XML input that belongs to this specialized class
@ In, xmlNode, xml.etree.ElementTree.Element, XML element node
@ Out, None
"""
paramInput = DataClassifier.getInputSpecification()()
paramInput.parseNode(xmlNode)
self._handleInput(paramInput)
def _handleInput(self, paramInput):
"""
Method that handles PostProcessor parameter input block
@ In, paramInput, ParameterInput, the already parsed input
@ Out, None
"""
for child in paramInput.subparts:
if child.getName() == 'variable':
func = child.findFirst('Function')
funcType = func.parameterValues['type']
funcName = func.value.strip()
self.mapping[child.parameterValues['name']] = (funcType, funcName)
elif child.getName() == 'label':
self.label = child.value.strip()
def inputToInternal(self, currentInput):
"""
Method to convert a list of input objects into the internal format that is
understandable by this pp.
@ In, currentInput, list, a list of DataObjects
@ Out, newInput, list, list of converted data
"""
if isinstance(currentInput,list) and len(currentInput) != 2:
self.raiseAnError(IOError, "Two inputs DataObjects are required for postprocessor", self.name)
newInput ={'classifier':{}, 'target':{}}
haveClassifier = False
haveTarget = False
for inputObject in currentInput:
if isinstance(inputObject, dict):
newInput.append(inputObject)
else:
if inputObject.type not in ['PointSet', 'HistorySet']:
self.raiseAnError(IOError, "The input for this postprocesor", self.name, "is not acceptable! Allowed inputs are 'PointSet' and 'HistorySet'.")
if len(inputObject) == 0:
self.raiseAnError(IOError, "The input", inputObject.name, "is empty!")
inputDataset = inputObject.asDataset()
inputParams = inputObject.getVars('input')
outputParams = inputObject.getVars('output')
dataType = None
mappingKeys = self.mapping.keys()
if len(set(mappingKeys)) != len(mappingKeys):
dups = set([elem for elem in mappingKeys if mappingKeys.count(elem) > 1])
self.raiseAnError(IOError, "The same variable {} name is used multiple times in the XML input".format(dups[0]))
if set(self.mapping.keys()) == set(inputParams) and self.label in outputParams:
dataType = 'classifier'
if not haveClassifier:
haveClassifier = True
else:
self.raiseAnError(IOError, "Both input data objects have been already processed! No need to execute this postprocessor", self.name)
if inputObject.type != 'PointSet':
self.raiseAnError(IOError, "Only PointSet is allowed as classifier, but HistorySet", inputObject.name, "is provided!")
else:
dataType = 'target'
if not haveTarget:
haveTarget = True
else:
self.raiseAnError(IOError, "None of the input DataObjects can be used as the reference classifier! Either the label", \
self.label, "is not exist in the output of the DataObjects or the inputs of the DataObjects are not the same as", \
','.join(self.mapping.keys()))
newInput[dataType]['input'] = dict.fromkeys(inputParams)
newInput[dataType]['output'] = dict.fromkeys(outputParams)
if inputObject.type == 'PointSet':
for elem in inputParams:
newInput[dataType]['input'][elem] = copy.deepcopy(inputDataset[elem].values)
for elem in outputParams:
newInput[dataType]['output'][elem] = copy.deepcopy(inputDataset[elem].values)
newInput[dataType]['type'] = inputObject.type
newInput[dataType]['name'] = inputObject.name
else:
# only extract the last element in each realization for the HistorySet
newInput[dataType]['type'] = inputObject.type
newInput[dataType]['name'] = inputObject.name
numRlzs = len(inputObject)
newInput[dataType]['historySizes'] = dict.fromkeys(range(numRlzs))
for i in range(numRlzs):
rlz = inputObject.realization(index=i)
for elem in inputParams:
if newInput[dataType]['input'][elem] is None:
newInput[dataType]['input'][elem] = np.empty(0)
newInput[dataType]['input'][elem] = np.append(newInput[dataType]['input'][elem], rlz[elem])
for elem in outputParams:
if newInput[dataType]['output'][elem] is None:
newInput[dataType]['output'][elem] = np.empty(0)
newInput[dataType]['output'][elem] = np.append(newInput[dataType]['output'][elem], rlz[elem].values[-1])
if newInput[dataType]['historySizes'][i] is None:
newInput[dataType]['historySizes'][i] = len(rlz[elem].values)
return newInput
def run(self, inputIn):
"""
This method executes the postprocessor action.
@ In, inputIn, list, list of DataObjects
@ Out, outputDict, dict, dictionary of outputs
"""
inputDict = self.inputToInternal(inputIn)
targetDict = inputDict['target']
classifierDict = inputDict['classifier']
outputDict = {}
outputType = targetDict['type']
outputDict['dataType'] = outputType
outputDict['dataFrom'] = targetDict['name']
if outputType == 'HistorySet':
outputDict['historySizes'] = copy.copy(targetDict['historySizes'])
numRlz = utils.first(targetDict['input'].values()).size
outputDict[self.label] = np.empty(numRlz)
for i in range(numRlz):
tempTargDict = {}
for param, vals in targetDict['input'].items():
tempTargDict[param] = vals[i]
for param, vals in targetDict['output'].items():
tempTargDict[param] = vals[i]
tempClfList = []
labelIndex = None
for key, values in classifierDict['input'].items():
calcVal = self.funcDict[key].evaluate("evaluate", tempTargDict)
inds, = np.where(np.asarray(values) == calcVal)
if labelIndex is None:
labelIndex = set(inds)
else:
labelIndex = labelIndex & set(inds)
if len(labelIndex) != 1:
self.raiseAnError(IOError, "The parameters", ",".join(tempTargDict.keys()), "with values", ",".join([str(el) for el in tempTargDict.values()]), "could not be put in any class!")
outputDict[self.label][i] = classifierDict['output'][self.label][list(labelIndex)[0]]
return outputDict
def collectOutput(self, finishedJob, output):
"""
Method to place all of the computed data into output object
@ In, finishedJob, object, JobHandler object that is in charge of running this postprocessor
@ In, output, object, the object where we want to place our computed results
@ Out, None
"""
evaluation = finishedJob.getEvaluation()
inputObjects, outputDict = evaluation
if isinstance(output, Files.File):
self.raiseAnError(IOError, "Dump results to files is not yet implemented!")
for inp in inputObjects:
if inp.name == outputDict['dataFrom']:
inputObject = inp
break
if inputObject != output:
## Copy any data you need from the input DataObject before adding new data
rlzs = inputObject.asDataset(outType='dict')['data']
if output.type == 'PointSet':
output.load(rlzs, style='dict')
elif output.type == 'HistorySet':
if inputObject.type != 'HistorySet':
self.raiseAnError(IOError, "Copying the data from input PointSet", inputObject.name, "to output HistorySet", output.name, "is currently not allowed!")
output.load(rlzs, style='dict', dims=inputObject.getDimensions())
if output.type == 'PointSet':
output.addVariable(self.label, copy.copy(outputDict[self.label]), classify='output')
elif output.type == 'HistorySet':
numRlzs = output.size
labelValues = np.zeros(numRlzs, dtype=object)
pivotParams = tuple(output.indexes)
slices = output.sliceByIndex('RAVEN_sample_ID')
coordList = []
for i in range(numRlzs):
coordDict = {}
for elem in pivotParams:
coordDict[elem] = slices[i].dropna(elem)[elem]
coordList.append(coordDict)
for i in range(numRlzs):
histSize = outputDict['historySizes'][i]
values = np.empty(histSize)
values.fill(outputDict[self.label][i])
xrArray = xr.DataArray(values, dims=pivotParams, coords=coordList[i])
labelValues[i] = xrArray
output.addVariable(self.label, labelValues, classify='output')
|
"""
This is a dictionary of information about each product type.
the keys of the dictionary are the names of each product type. This will
translate into local file and/or directory names for various outputs of
pdr-tests: index files, data downloads, etc.
These selection rules are ideally for the _largest individual file_ of each
product (which in some cases will be the only file; lucky you, if so).
manifest: the .parquet file that contains urls, file sizes, etc., scraped directly
from the hosting institution for this product type. for some data sets, all
urls will be found in the same .parquet file; for others, they may be split
between nodes or scraping sessions.
fn_must_contain: a list of strings that must be in the file name (not counting
directories, domains, etc.) to differentiate it from the other types in the
manifest file
url_must_contain: an optional additional list of strings that must be in the
url (counting the entire url) to differentiate this from other types in the
manifest file. useful for specifying directories.
label: "A" if the labels for this product type are attached; "D" if the labels
are detached.
"""
from pathlib import Path
import pdr_tests
MANIFEST_DIR = Path(Path(pdr_tests.__file__).parent, "node_manifests")
# shorthand variables for specific .parquet files
GEO_FILE = Path(MANIFEST_DIR, "geomsl.parquet")
file_information = {
"APXS_SCIENCE_EDR": {
"manifest": GEO_FILE,
"fn_regex": [r"ap.*esc.*\.dat"],
"url_must_contain": ["msl-m-apxs-2-edr"],
"label": "D",
},
"APXS_SPECTRUM_RDR": {
"manifest": GEO_FILE,
"fn_regex": [r"ap.*rsp.*\.csv"],
"url_must_contain": ["msl-m-apxs-4_5-rdr"],
"label": "D",
},
"APXS_OXIDE_RDR": {
"manifest": GEO_FILE,
"fn_regex": [r"ap.*rwp.*\.csv"],
"url_must_contain": ["msl-m-apxs-4_5-rdr"],
"label": "D",
}
}
|
from tkinter import *
from vue.base_frame import BaseFrame
from controller.order_controller import OrderController
class ListAdminOrderFrame(BaseFrame):
def __init__(self, order_controller : OrderController, master=None):
super().__init__(master)
self._order_controller = order_controller
self._orders = self._order_controller.listOrders()
self._create_widgets()
def _create_widgets(self):
self.title = Label(self, text="Orders:")
self.title.grid(row=0, column=0)
# grille
yDefil = Scrollbar(self, orient='vertical')
self.listbox = Listbox(self, yscrollcommand=yDefil.set, width=30, selectmode='single')
yDefil['command'] = self.listbox.yview
self.listbox.bind('<<ListboxSelect>>', self.on_select)
yDefil.grid(row=1, column=2, sticky='ns')
self.listbox.grid(row=1, column=0, columnspan=2, sticky='nsew')
# Return bouton
self.show_order_button = Button(self, text="Show order", command=self.show_order)
self.menu = Button(self, text="Return", fg="red",
command=self.show_menu)
self.menu.grid(row=2, column=0, sticky="w")
def on_select(self, event):
if len(self.listbox.curselection()) == 0:
self.show_order_button.grid_forget()
else:
self.show_order_button.grid(row=2, column=1, sticky="w")
def show(self):
self._orders = self._order_controller.listOrders()
self.listbox.delete(0, END)
if len(self._orders) != 0:
for index, order in enumerate(self._orders):
text = "N°{} ({}€)".format(order['id'], order['price'])
self.listbox.insert(index, text)
super().show()
def show_order(self):
if len(self.listbox.curselection()) == 0:
self.show_order_button.grid_forget()
else:
index = int(self.listbox.curselection()[0])
order = self._orders[index]
self._root_frame.show_order(order, True) |
#!/usr/bin/python
import sys
import os
import subprocess as sp
import shutil
from path import Path
# constants
TEMPLATE_FILE = 'project-config.template'
CWD = os.getcwd()
SWD = os.path.dirname(os.path.abspath(__file__))
ELASTIC = 'elasticsearch'
ELASTIC_URL = 'https://github.com/elastic/elasticsearch.git'
MAVEN = 'maven'
HADOOP = 'hadoop'
REPO_ROOT = 'repo'
CONFIG_FILE = 'config.properties'
template = Path(os.path.join(SWD, TEMPLATE_FILE))
cslicer_path = os.path.join(SWD, '..', '..', 'target', 'cslicer-1.0.0-jar-with-dependencies.jar')
def check_out_repo(url):
sp.check_call(['git', 'clone', url, REPO_ROOT])
def check_out_version(version, bname):
# create new branch if not already exists
if sp.call(['git', 'show-branch', bname]) != 0:
sp.check_call(['git', 'checkout', '-b', bname, version])
sp.call(['git', 'checkout', bname])
def clean_up_repo():
sp.check_call(['git', 'reset', 'HEAD', '--hard'])
sp.check_call(['git', 'checkout', 'master'])
def generate_config(temp, repo, jacoco, length, \
end, src_root, cls_root, build, \
cg='/tmp/cg.txt', touch='/tmp/touch.txt'):
with open(CONFIG_FILE, 'w') as f:
p = sp.Popen(['m4', \
'-D', 'REPO_PATH='+repo, \
'-D', 'EXEC_PATH='+jacoco, \
'-D', 'HISTORY_LENGTH='+length, \
'-D', 'END_COMMIT='+end, \
'-D', 'SOURCE_ROOT='+src_root, \
'-D', 'CLASS_ROOT='+cls_root, \
'-D', 'BUILD_PATH='+build, \
'-D', 'CALL_GRAPH_PATH='+cg, \
'-D', 'TOUCH_SET_PATH='+touch, \
temp], stdout=sp.PIPE)
for line in p.stdout:
f.write(line)
p.wait()
if p.returncode:
raise sp.CalledProcessError(p.returncode)
def run_unit_tests(tname):
sp.check_call(['mvn','test','-Dtest='+tname])
def run_functionality(tid, version, testname, r_root, e_root):
e_root.mkdir_p()
# check out specific version
with r_root:
check_out_version(version, tid)
# replace pom file
shutil.copy(os.path.join(SWD, \
'elasticsearch/e1/pom.xml'), 'pom.xml')
if not os.path.exists(os.path.join(e_root.abspath(), 'jacoco.exec')):
# run unit tests
run_unit_tests(testname)
# copy jacoco dump
shutil.copy('target/jacoco.exec', os.path.join(e_root.abspath(), 'jacoco.exec'))
# generate config file for cslicer
with e_root:
generate_config(template.abspath(), os.path.join(r_root.abspath(), '.git'), \
os.path.join(e_root.abspath(), 'jacoco.exec'), \
str(50), \
version, \
os.path.join(r_root.abspath(), 'src/main/java'), \
os.path.join(r_root.abspath(), 'target/classes'), \
os.path.join(r_root.abspath(), 'pom.xml'), \
'/tmp/cg-'+tid+'.txt',
'/tmp/touch-'+tid+'.txt')
# run cslicer
sp.check_call(['java','-jar', cslicer_path, '-q', '-c', CONFIG_FILE])
# collect results
# clean up
with r_root:
clean_up_repo()
def run_elastic_search():
p_root = Path(os.path.join(CWD, ELASTIC))
p_root.mkdir_p()
# check out repo
with p_root:
r_root = p_root / REPO_ROOT
if not r_root.exists():
check_out_repo(ELASTIC_URL)
# setup first functionality
tid = 'e1'
version = '647327f4'
testname = 'org.elasticsearch.script.GroovySandboxScriptTests#testDynamicBlacklist'
e_root = p_root / tid
run_functionality(tid, version, testname, r_root, e_root)
def main():
run_elastic_search()
if __name__ == "__main__":
sys.exit(main())
|
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from saspy.sasproccommons import SASProcCommons
from saspy.sasresults import SASresults
class SASutil:
"""
This class is for SAS BASE procedures to be called as python3 objects and use SAS as the computational engine
This class and all the useful work in this package require a licensed version of SAS.
To add a new procedure do the following:
#. Create a new method for the procedure
#. Create the set of required statements. If there are no required statements then create an empty set {}
#. Create the legal set of statements. This can often be obtained from the documentation of the procedure. 'procopts' should always be included in the legal set to allow flexibility in calling the procedure.
#. Create the doc string with the following parts at a minimum:
- Procedure Name
- Required set
- Legal set
- Link to the procedure documentation
#. Add the return call for the method using an existing procedure as an example
#. Verify that all the statements in the required and legal sets are listed in _makeProcCallMacro method of sasproccommons.py
#. Write at least one test to exercise the procedures and include it in the appropriate testing file
"""
def __init__(self, session, *args, **kwargs):
"""
Submit an initial set of macros to prepare the SAS system
:param session:
:param args:
:param kwargs:
"""
self.sasproduct = "util"
# create logging
# logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.WARN)
self.sas = session
self.logger.debug("Initialization of SAS Macro: " + self.sas.saslog())
def hpimpute(self, **kwargs: dict) -> 'SASresults':
"""
Python method to call the HPIMPUTE procedure
``required_set = {'impute'}``
``legal_set = {'input', 'impute', 'performance', 'id', 'freq', 'code', 'procopts'}``
For more information on the statements see the Documentation link.
Documentation link:
http://support.sas.com/documentation/cdl/en/stathpug/68163/HTML/default/viewer.htm#stathpug_hpsplit_syntax.htm
:param kwargs: dict
:return: SAS result object
"""
required_set = {'impute'}
legal_set = {'input', 'impute', 'performance', 'id', 'freq', 'code',
'procopts'}
self.logger.debug("kwargs type: " + str(type(kwargs)))
return SASProcCommons._run_proc(self, "HPIMPUTE", required_set, legal_set, **kwargs)
def hpbin(self, **kwargs: dict) -> 'SASresults':
"""
Python method to call the HPBIN procedure
``required_set = {}``
``legal_set= {'cls', 'code', 'grow', 'id', 'model', 'out', 'partition', 'performance', 'prune', 'rules'}``
cls is an alias for the class statement
For more information on the statements see the Documentation link.
Documentation link:
http://support.sas.com/documentation/cdl/en/stathpug/68163/HTML/default/viewer.htm#stathpug_hpsplit_syntax.htm
:param kwargs: dict
:return: SAS result object
"""
required_set = {}
legal_set = {'code', 'id', 'performance', 'target', 'input', 'procopts'}
self.logger.debug("kwargs type: " + str(type(kwargs)))
return SASProcCommons._run_proc(self, "HPBIN", required_set, legal_set, **kwargs)
def hpsample(self, **kwargs: dict) -> 'SASresults':
"""
Python method to call the HPSAMPLE procedure
``required_set = {}``
``legal_set= {'cls', 'code', 'grow', 'id', 'model', 'out'
'partition', 'performance', 'prune', 'rules'}``
cls is an alias for the class statement
For more information on the statements see the Documentation link.
Documentation link:
http://support.sas.com/documentation/cdl/en/stathpug/68163/HTML/default/viewer.htm#stathpug_hpsplit_syntax.htm
:param kwargs: dict
:return: SAS result object
"""
required_set = {}
legal_set = { 'class', 'performance', 'target', 'var', 'procopts'}
logging.debug("kwargs type: " + str(type(kwargs)))
return SASProcCommons._run_proc(self, "HPSAMPLE", required_set, legal_set, **kwargs)
|
from typing import cast
from src.const_settings import TOKENS_JSON_PATH
from src.custom_types import TokenTable
from src.utils import load_json_default
TOKEN_TABLE = cast(TokenTable, load_json_default(TOKENS_JSON_PATH))
|
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from csv import writer
import time
start_page = 1
pages_to_scrape = 1
sleep_time = 2
options = Options()
options.headless = True
driver = webdriver.Chrome(options=options)
def load_page(page_num):
global driver
driver.get('https://coinmarketcap.com/?page=' + str(page_num))
driver.execute_script("window.scrollTo(0, 750)")
with open('cryptocurrencies.csv', 'w', encoding='utf8', newline='') as f:
tw = writer(f)
tw.writerow(['name', 'symbol', 'price', 'market cap', 'circulating supply'])
for page_num in range(start_page-1, pages_to_scrape):
load_page(page_num+1)
driver.execute_script("window.scrollTo(0, 750)")
for i in range(10):
soup = BeautifulSoup(driver.page_source, 'html.parser')
table_entries = soup.find_all('tbody')[0].find_all('tr')[i*10:i*10+10]
for table_entry in table_entries:
name = table_entry.find('p', class_='sc-1eb5slv-0 iworPT').text
symbol = table_entry.find('p', class_='sc-1eb5slv-0 gGIpIK coin-item-symbol').text
price = table_entry.find('div', class_='sc-131di3y-0 cLgOOr').find('span').text
market_cap = table_entry.find('p', class_='sc-1eb5slv-0 hykWbK').find('span', class_='sc-1ow4cwt-1 ieFnWP').text
circulating_supply = table_entry.find('p', class_='sc-1eb5slv-0 kZlTnE').text
tw.writerow([name, symbol, price, market_cap, circulating_supply])
driver.execute_script("window.scrollTo(0, window.scrollY + 1100)")
time.sleep(sleep_time) |
from utils.distance_metrics import manhattan_distance, eculidean_distance
from utils.search_algorithms import BFS, DFS, A_STAR
from puzzle.puzzle_state import PuzzleState
import math
import time
import resource
class PuzzleSolver(object):
def __init__(self, initial_state, goal, algorithm='bfs', heuristic= None):
self.initial_state = initial_state
# Assign the search algorithm that will be used in the solver.
if(algorithm == 'bfs'):
self.search_alg = BFS
elif(algorithm == 'dfs'):
self.search_alg = DFS
elif(algorithm == 'ast'):
self.search_alg = A_STAR
else:
raise NotImplementedError("No such algorithm is supported.")
# Assign the heuristic algorithm that will be used in the solver.
if(heuristic == None and algorithm == 'ast'):
raise AttributeError("Required Attribute `heuristic` in case of useing A* Search.")
elif(heuristic == 'manhattan'):
self.dist_metric = manhattan_distance
elif(heuristic == 'euclidean'):
self.dist_metric = eculidean_distance
elif(heuristic == None and algorithm != 'ast'):
pass
else:
raise NotImplementedError("No such Heuristic is supported.")
# Create a Puzzle State Object with the inputs for Solver.
initial_state = tuple(map(int, initial_state))
size = int(math.sqrt(len(initial_state)))
self.puzzle_state = PuzzleState(initial_state, size, goal, self.calculate_total_cost)
# Start off by checking the solvability of the state and raise error in case of false.
if(not self.puzzle_state.is_solvable()):
raise Exception("The initial state enetred is not solvable !")
def calculate_total_cost(self, state):
"""calculate the total estimated cost of a state"""
sum_heuristic = 0
for i, item in enumerate(state.config):
current_row = i // state.n
current_col = i % state.n
goal_idx = state.goal.index(item)
goal_row = goal_idx // state.n
goal_col = goal_idx % state.n
sum_heuristic += self.dist_metric(current_row,current_col,goal_row,goal_col)
return sum_heuristic + state.cost
def writeOutput(self, result, running_time, ram_usage):
final_state, nodes_expanded, max_search_depth = result
path_to_goal = [final_state.action]
cost_of_path = final_state.cost
parent_state = final_state.parent
while parent_state:
if parent_state.parent:
path_to_goal.append(parent_state.action)
parent_state = parent_state.parent
path_to_goal.reverse()
search_depth = len(path_to_goal)
print("******* Results *******")
print("path_to_goal: " + str(path_to_goal) + "\n")
print("cost_of_path: " + str(cost_of_path) + "\n")
print("nodes_expanded: " + str(nodes_expanded) + "\n")
print("search_depth: " + str(search_depth) + "\n")
print("max_search_depth: " + str(max_search_depth) + "\n")
print("running_time: " + str(running_time) + "\n")
print("max_ram_usage: " + str(ram_usage) + "\n")
def solve(self):
start_time = time.time()
mem_init = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if(self.search_alg == A_STAR):
results = A_STAR(self.puzzle_state, self.calculate_total_cost)
else:
results = self.search_alg(self.puzzle_state)
running_time = time.time() - start_time
mem_final = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
ram_usage = (mem_final - mem_init) / 1024
self.writeOutput(results, running_time, ram_usage)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import mysql.connector
from mysql.connector import errorcode
def close_db(cursor, cnx):
cursor.close()
cnx.close()
def open_db():
config = {
'user': 'root',
'password': 'toor',
'host': '127.0.0.1',
'database': 'pythontest',
'raise_on_warnings': True
}
try:
return mysql.connector.connect(**config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
|
import threading
import time
import subprocess
class mpstat(threading.Thread):
def __init__(self, callback):
"""Form the monitor and register the monitored metrics."""
super(mpstat, self).__init__()
self.stop_flag = False
self.interval = None
self.callback = callback
self.callback.add('cpu', 'inuse')
self.callback.add('cpu', 'steal')
def stop(self):
"""Prepare monitor to stop."""
self.stop_flag = True
def set(self, argument, value):
"""Set a monitor argument.
Arguments:
argument -- arugment name
value -- value for the argument
"""
if argument == 'interval':
self.interval = value
def run(self):
"""Monitor the metrics."""
if not self.interval:
raise RuntimeError
# open mpstat as a subprocess
process_parameters = ['mpstat', str(self.interval)]
try:
mpstat_subprocess = subprocess.Popen(process_parameters,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except:
print('Could not create mpstat monitor, check if the sysstat package is installed in your system.')
return
# discard prelude
mpstat_subprocess.stdout.readline()
mpstat_subprocess.stdout.readline()
# get column numbers
output_line = mpstat_subprocess.stdout.readline().decode('ascii')
output_line_columns = output_line.split()
idle_col = output_line_columns.index('%idle')
steal_col = output_line_columns.index('%steal')
while mpstat_subprocess.poll() is None and not self.stop_flag:
# read one line from the output
output_line = mpstat_subprocess.stdout.readline().decode('ascii')
output_line_columns = output_line.split()
# in_use = 100.0 - %idle
# (%idle: 12th column in line)
idle = float(output_line_columns[idle_col].replace(',', '.'))
inuse = 100.0 - idle
self.callback.set_metric('cpu', 'inuse', inuse)
# steal = %steal
# (%steal: 9th column in line)
steal = float(output_line_columns[steal_col].replace(',', '.'))
self.callback.set_metric('cpu', 'steal', steal)
# sleep some time
time.sleep(self.interval)
#if mpstat_subprocess.poll() is not None:
# mpstat_subprocess.terminate()
self.callback.remove('cpu', 'inuse')
self.callback.remove('cpu', 'steal')
|
#!/usr/bin/env python
# coding=utf-8
#
# Copyright (C) 2020 Florian Heller, florian.heller@uhasselt.be
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import math
import inkex
from lxml import etree
import re
class LaserSVG(inkex.EffectExtension):
selected_nodes = {}
LASER_NAMESPACE = "http://www.heller-web.net/lasersvg/"
LASER_PREFIX = "laser"
LASER = "{%s}" % LASER_NAMESPACE
laserSVGScriptURL = "http://www2.heller-web.net/lasersvg/lasersvg.js"
oldThickness = 0
def add_arguments(self, pars):
pars.add_argument("--kerf_width", default=0, help="The kerf width")
pars.add_argument("--action", default="cut", help="The default laser operation")
pars.add_argument("--scale", default="100", help="The scaling factor ")
pars.add_argument("--interactive", default=True, help="whether or not to add the stylesheet and the JS references to the file")
pars.add_argument("--material_thickness", default=3, help="The material thickness")
pars.add_argument("--tab", help="The selected UI-tab when OK was pressed")
def effect(self):
# self.document and self.svg seem both to be the same etree
# The trailing slash is important, otherwise inkscape doesn't load the file correctly
# Register the namespace prefix both with etree and inkscape
etree.register_namespace("laser", self.LASER_NAMESPACE)
inkex.utils.NSS["laser"] = self.LASER_NAMESPACE
#Save the old thickness
oldValue = self.document.getroot().get(inkex.addNS("material-thickness", self.LASER_PREFIX))
if oldValue is not None:
self.oldThickness = float(oldValue)
# Set/Update the global thickness in the SVG root node
self.document.getroot().set(inkex.addNS("material-thickness", self.LASER_PREFIX), self.options.material_thickness)
# Set/Update the global kerf value in the SVG root node
self.document.getroot().set(inkex.addNS("kerf", self.LASER_PREFIX), self.options.kerf_width)
#Set/Update the gobal laser action in the SVG root node
self.document.getroot().set(inkex.addNS("action", self.LASER_PREFIX), self.options.action)
# adjust the thickness on all elements
self.adjust_element_thickness(self.options.material_thickness)
# inkex.utils.debug(self.document.getroot().nsmap)
if self.options.interactive == 'true':
# Check if there is a reference to the JS and CSS already, otherwise add it
# While workin on the file, Inkscape requires the SVG namespace in front of SVG element, even though the prefix will be removed while saving.
if not self.document.getroot().findall(".//{http://www.w3.org/2000/svg}script[@xlink:href='http://www2.heller-web.net/lasersvg/lasersvg.js']"):
scriptElement = etree.SubElement(self.document.getroot(), "script")
# inkex.utils.debug(scriptElement.localName)
scriptElement.set("type", "text/javascript")
scriptElement.set("xlink:href", self.laserSVGScriptURL)
#inkex.utils.debug(etree.tostring(self.document.getroot(),pretty_print=True))
# #rect.set(XHTML + "thickness-adjust","width")
# inkex.utils.debug(etree.tostring(rect))
def adjust_element_thickness(self, newThickness):
for node in self.document.getroot().iterfind(".//*[@%sthickness-adjust]" % self.LASER):
adjust_setting = node.get("%s:thickness-adjust" % self.LASER_PREFIX)
if adjust_setting == "width":
node.attrib["width"] = newThickness
elif adjust_setting == "height":
node.attrib["height"] = newThickness
elif adjust_setting == "both":
node.attrib["height"] = newThickness
node.attrib["width"] = newThickness
# Adjust position of origin is specified
newThicknessF = float(newThickness)
originX, originY, centerX, centerY = 0, 0, 0, 0
origin = node.get(inkex.addNS("origin", self.LASER_PREFIX))
if origin is not None:
if node.get(inkex.addNS("x", self.LASER_PREFIX)) is None:
centerX = float(node.get("x")) + (self.oldThickness/2)
node.set(inkex.addNS("centerX", self.LASER_PREFIX), centerX)
node.set(inkex.addNS("x", self.LASER_PREFIX), node.get("x"))
centerX = float(node.get(inkex.addNS("centerX", self.LASER_PREFIX)))
originX = float(node.get(inkex.addNS("x", self.LASER_PREFIX)))
if node.get(inkex.addNS("y", self.LASER_PREFIX)) is None:
centerY = float(node.get("y")) + (self.oldThickness/2)
node.set(inkex.addNS("centerY", self.LASER_PREFIX), centerY)
node.set(inkex.addNS("y", self.LASER_PREFIX), node.get("y"))
centerY = float(node.get(inkex.addNS("centerY", self.LASER_PREFIX)))
originY = float(node.get(inkex.addNS("y", self.LASER_PREFIX)))
if origin == "bottom":
if adjust_setting == "height" or adjust_setting == "both":
node.set("y", originY + self.oldThickness - newThicknessF)
elif origin == "right":
if adjust_setting == "width" or adjust_setting == "both":
node.set("x", originX + self.oldThickness - newThicknessF)
elif origin == "bottom-right":
if adjust_setting == "height" or adjust_setting == "both":
node.set("y", originY + self.oldThickness - newThicknessF)
if adjust_setting == "width" or adjust_setting == "both":
node.set("x", originX + self.oldThickness - newThicknessF)
elif origin == "center":
if adjust_setting == "height" or adjust_setting == "both":
node.set("y", centerY - (newThicknessF/2))
if adjust_setting == "width" or adjust_setting == "both":
node.set("x", centerX - (newThicknessF/2))
# inkex.utils.debug(node.get("laser:thickness-adjust"))
# inkex.utils.debug(node.attrib)
# nodes = self.document.getroot().findall(".//*[@%s:thickness-adjust]" % self.LASER_PREFIX)
# And now the paths
self.adjust_path_thickness(newThickness)
def adjust_path_thickness(self, newThickness):
for node in self.document.getroot().iterfind(".//*[@{}template]".format(self.LASER)):
template = node.get(inkex.addNS("template", self.LASER_PREFIX))
pattern = re.compile(r'[{](.*?)[}]')
evaluate = lambda x: str(eval(x.group(1),{},{"thickness":float(self.options.material_thickness)}))
result = re.sub(pattern, evaluate, template)
node.set("d",result)
if __name__ == '__main__':
LaserSVG().run()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import os
import sys
from functools import partial
import xml.etree.ElementTree as ET
import lxml.etree as lxmlET
import yaml
from .frame_range_dlg_ui import *
# import frame_range_dlg_ui
class FrameRangeDialog(QDialog):
def __init__(self, *args, **kwargs):
super(FrameRangeDialog, self).__init__(*args, **kwargs)
self.ui = Ui_Dialog()
self.ui.setupUi(self)
# Handle findXML buttons
self.sfs = [self.ui.sf_spinBox_488, self.ui.sf_spinBox_561,
self.ui.sf_spinBox_647, self.ui.sf_spinBox_750]
self.efs = [self.ui.ef_spinBox_488, self.ui.ef_spinBox_561,
self.ui.ef_spinBox_647, self.ui.ef_spinBox_750]
self.ui.okBtn.clicked.connect(self.send_params)
self.ui.cancelBtn.clicked.connect(self.cancel)
with open(r'C:\Users\Vatsal\QT_Projects\STORM_GUI\configs\XMLs.yml') as f:
self.config = yaml.load(f)
def send_params(self):
sfs = []
efs = []
for sf, ef, channel in zip(self.sfs, self.efs, [488, 561, 647, 750]):
file_path = self.config[channel]
if os.path.exists(file_path):
tree = lxmlET.parse(file_path)
root = tree.getroot()
else:
self.error_dialog = QErrorMessage()
self.error_dialog.setWindowTitle('Error')
self.error_dialog.showMessage('Please enter valid XML path for channel {}!'.format(channel))
return
# Second need to find necessary variables of interest that will be edited for each XML file
for child in root:
if child.tag == "start_frame":
child.text = str(sf.value())
if child.tag == "max_frame":
child.text = str(ef.value())
if tree:
tree.write(file_path)
msg = QMessageBox()
msg.setWindowTitle("Success")
msg.setText('Frame ranges are set!')
msg.exec_()
self.close()
return (sfs, efs)
def cancel(self):
self.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = FrameRangeDialog()
ex.show()
app.exec_()
sys.exit()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 - for information on the respective copyright owner
# see the NOTICE file and/or the repository https://github.com/boschresearch/statestream
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
import os
from statestream.meta.network import network_rollback, \
shortest_path, \
MetaNetwork
# =============================================================================
class saliency_grad(object):
"""Provide saliency using grad CAM visualization.
For now we only allow metrics between two (maybe same) nps.
Parameters:
-----------
net : dict
Complete network dictionary containing all nps, sps, plasts, ifs.
name : string
Instance name of this system client.
client_param : dict
Dictionary containing system clients parameters.
"""
def __init__(self, name, net, client_param):
self.name = copy.copy(name)
self.type = "saliency_grad"
self.p = copy.deepcopy(client_param)
self.net = copy.deepcopy(net)
self.param = copy.deepcopy(client_param['param'])
self.dat = {}
self.sv = copy.deepcopy(client_param['selected_values'])
self.mn = MetaNetwork(net)
self.parameter = {}
for p,P in enumerate(client_param['params']):
self.parameter[P['name']] = P['value']
# Number of samples evaluated (only want the first).
self.samples = 1
self.stats = {}
for v,V in client_param['variables'].items():
self.stats[v] = np.zeros(V['shape'], dtype=np.float32)
self.current_frame = 0
def initialize(self, shm):
"""Initialize this client.
"""
# Check / set device and backend.
self.parameter['device'] = self.parameter.get('device', 'cpu')
# Set GPU.
os.environ["THEANO_FLAGS"] = 'floatX=float32, optimizer_including=cudnn, optimizer=fast_compile, mode=FAST_RUN, blas.ldflags="-lopenblas"' \
+ ",device=" + self.parameter['device']
# Here now theano may be imported and everything dependent on it.
import theano
import theano.tensor as T
from statestream.neuronal.neuron_pool import NeuronPool
from statestream.neuronal.synapse_pool import SynapsePool
# Generate theano variables.
self.th_stats = {}
for v,V in self.stats.items():
self.th_stats[v] \
= theano.shared(np.zeros(V.shape, dtype=theano.config.floatX),
borrow=True,
name='stats ' + v)
# Build nps / sps.
# ---------------------------------------------------------------------
# Determine depth.
state_stream = shortest_path(self.net, self.parameter['source'], self.parameter['target'])
self.client_graph_depth = len(state_stream) - 1
# Build client graph.
self.client_graph_nps = [[] for i in range(self.client_graph_depth + 1)]
self.client_graph_sps = [[] for i in range(self.client_graph_depth + 1)]
# Initialize with target layer.
self.client_graph_nps[self.client_graph_depth].append(self.parameter["target"])
# Rollback (fill) graph.
network_rollback(self.net, self.client_graph_depth, self.client_graph_nps, self.client_graph_sps)
self.nps = {}
self.sps = {}
self.all_nps = list(set([n for np_list in self.client_graph_nps for n in np_list]))
self.all_sps = list(set([s for sp_list in self.client_graph_sps for s in sp_list]))
# Create all necessary neuron pools.
loc_net = copy.deepcopy(self.net)
loc_net['agents'] = self.samples
loc_mn = MetaNetwork(loc_net)
self.net['agents'] = 1
for n in self.all_nps:
if n not in self.nps:
self.nps[n] = NeuronPool(n, loc_net, self.param, loc_mn)
# Create all necessary synapse pools.
for s in self.all_sps:
S = self.net["synapse_pools"][s]
# List of sources.
source_np_list = []
for I in range(len(S["source"])):
source_np_list.append([])
for i in range(len(S["source"][I])):
source_np_list[-1].append(self.nps[S["source"][I][i]])
self.sps[s] = SynapsePool(s,
loc_net,
self.param,
loc_mn,
source_np_list,
self.nps[S["target"]])
# Rollout network to specified depth.
# ---------------------------------------------------------------------
# Rollout network.
for depth in range(self.client_graph_depth):
# Post synaptic has to come BEFORE next state.
for s in self.all_sps:
if s in self.client_graph_sps[depth + 1]:
self.sps[s].compute_post_synaptic(as_empty=False)
else:
self.sps[s].compute_post_synaptic(as_empty=True)
# Now update next state.
for n in self.all_nps:
if n in self.client_graph_nps[depth + 1]:
self.nps[n].compute_algebraic_next_state(as_empty=False)
else:
self.nps[n].compute_algebraic_next_state(as_empty=True)
# Apply magic function on target.
tgt_fcn = self.parameter['magic'].replace('#', 'self.nps[self.parameter["target"]].state[self.client_graph_depth]')
self.target = eval(tgt_fcn)
self.source = self.nps[self.parameter["source"]].state[0]
# Define updates for grad-CAM visualization objects.
self.updates = []
grad = T.grad(self.target, self.source)
avgpool_grad = T.mean(grad, axis=[2,3], keepdims=True)
grad_weighted_maps = self.source * avgpool_grad
grad_weighted_map = T.mean(grad_weighted_maps, axis=1, keepdims=True)
self.updates.append((self.th_stats['grads'], grad))
self.updates.append((self.th_stats['avgpool_grads'], T.unbroadcast(T.unbroadcast(avgpool_grad, 2), 3)))
self.updates.append((self.th_stats['grad_weighted_maps'], grad_weighted_maps))
self.updates.append((self.th_stats['grad_weighted_map'], T.unbroadcast(grad_weighted_map, 1)))
# Define theano function for updates.
self.update_client = theano.function([], [], updates=self.updates)
def update_frame_readin(self, shm):
"""System client dependent read in.
"""
pass
def update_frame_writeout(self):
"""Method to compute activation statistics for all child nps.
"""
self.current_frame += 1
self.update_client()
for v,V in self.th_stats.items():
self.dat['variables'][v][:] = V.get_value()
|
# This package is for Dr. Andrea's addtional brain image folder "EOR" shared in summer 2017. |
from __future__ import absolute_import, unicode_literals
import factory
from datetime import datetime
from physical.tests.factory import InstanceFactory, VolumeFactory
from backup.models import Snapshot
class SnapshotFactory(factory.DjangoModelFactory):
FACTORY_FOR = Snapshot
start_at = datetime.now()
type = Snapshot.SNAPSHOPT
status = Snapshot.SUCCESS
instance = factory.SubFactory(InstanceFactory)
database_name = factory.Sequence(lambda n: 'database_{0}'.format(n))
size = 1024
snapshopt_id = factory.Sequence(lambda n: 'id_{0}'.format(n))
snapshot_name = factory.Sequence(lambda n: 'name_{0}'.format(n))
volume = factory.SubFactory(VolumeFactory)
|
import pybullet as p
import time
p.connect(p.GUI)
p.setGravity(0, 0, -10)
p.setPhysicsEngineParameter(enableSAT=1)
p.loadURDF("cube_concave.urdf", [0, 0, -25],
globalScaling=50,
useFixedBase=True,
flags=p.URDF_INITIALIZE_SAT_FEATURES)
p.loadURDF("cube.urdf", [0, 0, 1], globalScaling=1, flags=p.URDF_INITIALIZE_SAT_FEATURES)
p.loadURDF("duck_vhacd.urdf", [1, 0, 1], globalScaling=1, flags=p.URDF_INITIALIZE_SAT_FEATURES)
while (p.isConnected()):
p.stepSimulation()
pts = p.getContactPoints()
#print("num contacts = ", len(pts))
time.sleep(1. / 240.)
|
import time
from sqlitedict import SqliteDict
from nonebot import get_driver
from requests import Session
from requests.cookies import create_cookie
from typing import Dict, Union, Any, List
from .config import Config
from .crypto import *
from .model import *
from .exception import NoLoginException
from .parser import report_parser, xisu_report_parser
plugin_config = Config.parse_obj(get_driver().config)
if not Path.exists(plugin_config.data_path):
Path.mkdir(plugin_config.data_path, parents=True)
if not Path.exists(plugin_config.secret_path):
Path.mkdir(plugin_config.secret_path, parents=True)
salt = initialize(plugin_config.salt_path)
API_TIMEOUT = plugin_config.api_timeout
class DBUser:
qq: str
path: Path
username: str
password: str
_password: str
cookies: List
_cookie_eaisess: str
_cookie_uukey: str
_secret: bytes
_iv: bytes
checkin_time: str
xisu_checkin_time: str
is_stopped: bool
is_xisu_stopped: bool
def __init__(self, conf: Dict = None, path: Path = None):
self.path = path and path or plugin_config.secret_path
if conf is not None:
for i in conf.values():
self.qq = i.get('qq')
self.username = i.get('username')
self._password = i.get('password')
self._cookie_eaisess = i.get('cookie_eaisess')
self._cookie_uukey = i.get('cookie_uukey')
self._secret, self._iv = get_passwd_secret_iv(self.path, self.qq)
self.checkin_time = i.get('checkin_time', '08:00')
self.xisu_checkin_time = i.get('xisu_checkin_time', '08:01|12:01|18:01')
self.is_stopped = i.get('is_stopped', False)
self.is_xisu_stopped = i.get('is_xisu_stopped', False)
break
else:
self.checkin_time = '08:00'
self.xisu_checkin_time = '08:01|12:01|18:01'
self._cookie_eaisess = ''
self._cookie_uukey = ''
self.is_stopped = False
self.is_xisu_stopped = False
def set_password(self, password: str):
self._secret, self._iv = get_passwd_secret_iv(self.path, self.qq, password, salt)
self._password = msg_encrypt(password, self._secret, self._iv)
def set_cookie(self, cookie_eaisess: str, cookie_uukey: str):
assert self._secret and self._iv
self._cookie_eaisess = msg_encrypt(cookie_eaisess, self._secret, self._iv)
self._cookie_uukey = msg_encrypt(cookie_uukey, self._secret, self._iv)
def __getattr__(self, item) -> Any:
if item == 'password':
assert self._secret and self._iv
return msg_decrypt(self._password, self._secret, self._iv)
if item == 'cookies':
assert self._secret and self._iv
return [msg_decrypt(self._cookie_eaisess, self._secret, self._iv),
msg_decrypt(self._cookie_uukey, self._secret, self._iv)]
return None
def dict(self) -> dict:
return {
self.qq: {
"qq": self.qq,
"username": self.username,
"password": self._password,
"cookie_eaisess": self._cookie_eaisess,
"cookie_uukey": self._cookie_uukey,
"checkin_time": self.checkin_time,
"xisu_checkin_time": self.xisu_checkin_time,
"is_stopped": self.is_stopped,
"is_xisu_stopped": self.is_xisu_stopped
}
}
class BUPTUser(SqliteDict):
qq: str
session: Session
is_login: bool
_username: str
_passwd: str
db: DBUser
def __init__(self, qq: Union[str, int]):
if isinstance(qq, int):
qq = str(qq)
super().__init__(plugin_config.database_path, autocommit=True)
self.qq = qq
self.is_login = False
self.session = Session()
def get_or_create(self,
username: str = None,
password: str = None,
force: bool = False
):
if self.get(self.qq) and not force:
self.db = DBUser(self.get(self.qq))
self._username = self.db.username
self._passwd = self.db.password
cookies = self.db.cookies
self.session.cookies.set_cookie(create_cookie("eai-sess", cookies[0], domain=BASIC_DOMAIN))
self.session.cookies.set_cookie(create_cookie("UUkey", cookies[1], domain=BASIC_DOMAIN))
self.is_login = True
else:
assert username and password
self._username = username
self._passwd = password
self.db = DBUser()
self.db.username = self._username
self.db.qq = self.qq
self.db.set_password(self._passwd)
self.do_login()
return self
def do_login(self):
login_resp = self.session.post(LOGIN_API, data={
'username': self._username,
'password': self._passwd,
}, timeout=API_TIMEOUT)
if login_resp.status_code != 200:
raise ConnectionError(f"Failed to Login!\nError Code: {login_resp.status_code}")
elif (resp := login_resp.json()).get('e'):
raise ConnectionError(f"Failed to Login!\nError Message: {resp.get('m')}")
self.db.set_cookie(login_resp.cookies['eai-sess'], login_resp.cookies['UUkey'])
self.is_login = True
self.save()
def save(self):
self[self.qq] = self.db.dict()
def read(self) -> Dict:
# print(self.db.cookies, self.db.password)
return self.get(self.qq)
def ncov_checkin(self) -> str:
report_page_resp = self.session.get(REPORT_PAGE, allow_redirects=False,
timeout=API_TIMEOUT)
if report_page_resp.status_code != 200:
raise ConnectionError(f"Failed to Checkin\nError Code: {report_page_resp.status_code}")
if "realname" not in report_page_resp.text:
raise NoLoginException("Failed to Checkin\nNo Login")
post_data = report_parser(report_page_resp.text)
final_resp = self.session.post(REPORT_API, post_data,
headers={'Accept': 'application/json', 'X-Requested-With': 'XMLHttpRequest'},
timeout=API_TIMEOUT)
if final_resp.status_code != 200:
raise ConnectionError(f"Failed to Checkin\nError Code: {report_page_resp.status_code}")
if (resp := final_resp.json()).get('e'):
raise ConnectionError(f"Failed to Checkin!\nError Message: {resp.get('m')}")
return resp.get('m')
def xisu_ncov_checkin(self) -> str:
report_page_resp = self.session.get(XISU_HISTORY_DATA, allow_redirects=False,
timeout=API_TIMEOUT)
if report_page_resp.status_code != 200:
raise ConnectionError(f"Failed to Xisu Checkin\nError Code: {report_page_resp.status_code}")
data = report_page_resp.json()
report_page_resp = self.session.get(REPORT_PAGE, allow_redirects=False,
timeout=API_TIMEOUT)
if report_page_resp.status_code != 200:
raise ConnectionError(f"Failed to Xisu Checkin\nError Code: {report_page_resp.status_code}")
if "realname" not in report_page_resp.text:
raise NoLoginException("Failed to Xisu Checkin\nNo Login")
post_data = xisu_report_parser(report_page_resp.text, data)
final_resp = self.session.post(XISU_REPORT_API, post_data,
headers={'Accept': 'application/json', 'X-Requested-With': 'XMLHttpRequest'},
timeout=API_TIMEOUT)
if final_resp.status_code != 200:
raise ConnectionError(f"Failed to Xisu Checkin\nError Code: {report_page_resp.status_code}")
if (resp := final_resp.json()).get('e'):
raise ConnectionError(f"Failed to Xisu Checkin!\nError Message: {resp.get('m')}")
return resp.get('m')
|
import torch
import torch.nn as nn
import torch.optim as optim
from bert_pytorch import BERT
from utils import load_raw_math_data, load_vocab, make_weights_for_balanced_classes
from adabelief_pytorch import AdaBelief
from MathDataset import MathDataset
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
import random as rd
import os
import numpy as np
class MathNet(nn.Module):
def __init__(self, bert, class_num, seq_len, dmodel):
super().__init__()
self.BERT = bert
self.W = nn.Linear(dmodel * seq_len, class_num)
self.class_num = class_num
self.seq_len = seq_len
for param in self.BERT.parameters():
param.requires_grad = False
def forward(self, sequence):
out = self.BERT(sequence)
bsize, slen, dmodel = out.shape
linear_seq = out.reshape(bsize, -1)
classes = self.W(linear_seq)
return classes
if __name__ == "__main__":
if not os.path.exists("./output"):
os.makedirs("./output")
questions, labels = load_raw_math_data("./data/traindata.tsv")
vocab = load_vocab("./data/vocab.txt")
seq_len = 128
number_of_classes = 7
data_len = len(questions)
device = torch.device("cuda:0")
bert = torch.load("./pretrained/bert_trained.model.ep1000")
math_dataset = MathDataset(questions, labels, vocab)
weights = make_weights_for_balanced_classes(labels, number_of_classes)
weights = torch.DoubleTensor(weights)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
train_dataset = torch.utils.data.DataLoader(math_dataset, batch_size = 128, sampler = sampler, pin_memory=True)
model = MathNet(bert, number_of_classes, 128, 468).to(device)
optimizer = AdaBelief(model.parameters(), lr = 0.0001, eps=1e-8, betas=(0.9, 0.999), weight_decouple = True, rectify = False)
#optimizer = optim.SGD(model.parameters(), lr = 0.0001, weight_decay = 0.5)
criterionA = nn.CrossEntropyLoss()
pbar = tqdm(range(0,300000))
for epoch in pbar:
avg_loss = 0.0
model.train()
# Train
for batch in train_dataset:
questions = batch[0].to(device)
labels = batch[2].to(device)
pred_class = model(questions)
loss = criterionA(pred_class, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item()
if epoch and epoch % 20 == 0:
print("model save...")
torch.save(model, "./output/classifier_base"+f"_{epoch}"+".pth")
cur_epoch = avg_loss / len(train_dataset)
pbar.set_description("train loss : %f " % cur_epoch)
|
import unittest
from pathlib import Path
import numpy as np
import torch
from PIL import Image
import frarch.datasets.transforms as t
DATA_FOLDER = Path("./tests/data/")
class TestImageTransforms(unittest.TestCase):
SYM_IMG = Image.fromarray(np.random.randint(0, 255, (100, 100, 3)).astype(np.uint8))
ASYM_IMG = Image.fromarray(np.random.randint(0, 255, (50, 100, 3)).astype(np.uint8))
def test_RandomFlip_symetric(self):
for _ in range(10):
out_image = t.RandomFlip()(self.SYM_IMG)
self.assertEqual(out_image.size, self.SYM_IMG.size)
def test_RandomFlip_asymetric(self):
for _ in range(10):
out_image = t.RandomFlip()(self.ASYM_IMG)
self.assertEqual(out_image.size, self.ASYM_IMG.size)
def test_RandomFlip_not_PIL_image(self):
with self.assertRaises(ValueError):
t.RandomFlip()(np.random.randint((10, 10, 3)))
def test_RandomRotate_symetric(self):
for _ in range(10):
out_image = t.RandomRotate()(self.SYM_IMG)
self.assertEqual(out_image.size, self.SYM_IMG.size)
def test_RandomRotate_asymetric(self):
possible_sizes = [self.ASYM_IMG.size, tuple(reversed(self.ASYM_IMG.size))]
for _ in range(10):
out_image = t.RandomRotate()(self.ASYM_IMG)
self.assertIn(out_image.size, possible_sizes)
def test_RandomRotate_not_PIL_image(self):
with self.assertRaises(ValueError):
t.RandomRotate()(np.random.randint((10, 10, 3)))
def test_PILColorBalance(self):
out_image = t.PILColorBalance(0.1)(self.SYM_IMG)
self.assertEqual(out_image.size, self.SYM_IMG.size)
def test_PILColorBalance_negative_alpha(self):
with self.assertRaises(ValueError):
t.PILColorBalance(-1.0)
def test_PILColorBalance_too_big_alpha(self):
with self.assertRaises(ValueError):
t.PILColorBalance(3.0)
def test_PILColorBalance_not_float_alpha(self):
with self.assertRaises(ValueError):
t.PILColorBalance(int(1))
def test_PILColorBalance_not_PIL_image(self):
with self.assertRaises(ValueError):
t.PILColorBalance(0.1)(np.random.randint((10, 10, 3)))
def test_PILContrast(self):
out_image = t.PILContrast(0.1)(self.SYM_IMG)
self.assertEqual(out_image.size, self.SYM_IMG.size)
def test_PILContrast_negative_alpha(self):
with self.assertRaises(ValueError):
t.PILContrast(-1.0)
def test_PILContrast_too_big_alpha(self):
with self.assertRaises(ValueError):
t.PILContrast(3.0)
def test_PILContrast_not_float_alpha(self):
with self.assertRaises(ValueError):
t.PILContrast(int(1))
def test_PILContrast_not_PIL_image(self):
with self.assertRaises(ValueError):
t.PILContrast(0.1)(np.random.randint((10, 10, 3)))
def test_PILBrightness(self):
out_image = t.PILBrightness(0.1)(self.SYM_IMG)
self.assertEqual(out_image.size, self.SYM_IMG.size)
def test_PILBrightness_negative_alpha(self):
with self.assertRaises(ValueError):
t.PILBrightness(-1.0)
def test_PILBrightness_too_big_alpha(self):
with self.assertRaises(ValueError):
t.PILBrightness(3.0)
def test_PILBrightness_not_float_alpha(self):
with self.assertRaises(ValueError):
t.PILBrightness(int(1))
def test_PILBrightness_not_PIL_image(self):
with self.assertRaises(ValueError):
t.PILBrightness(0.1)(np.random.randint((10, 10, 3)))
def test_PILSharpness(self):
out_image = t.PILSharpness(0.1)(self.SYM_IMG)
self.assertEqual(out_image.size, self.SYM_IMG.size)
def test_PILSharpness_negative_alpha(self):
with self.assertRaises(ValueError):
t.PILSharpness(-1.0)
def test_PILSharpness_too_big_alpha(self):
with self.assertRaises(ValueError):
t.PILSharpness(3.0)
def test_PILSharpness_not_float_alpha(self):
with self.assertRaises(ValueError):
t.PILSharpness(int(1))
def test_PILSharpness_not_PIL_image(self):
with self.assertRaises(ValueError):
t.PILSharpness(0.1)(np.random.randint((10, 10, 3)))
def test_RandomOrder(self):
for i in range(2):
called_fn = []
def fn0(*args):
called_fn.append(0)
return args
def fn1(*args):
called_fn.append(1)
return args
torch.random.manual_seed(i)
ro = t.RandomOrder([fn0, fn1])
ro(self.SYM_IMG)
if i == 0:
self.assertEqual(called_fn, [0, 1])
else:
self.assertEqual(called_fn, [1, 0])
def test_RandomOrder_None(self):
ro = t.RandomOrder(None)
self.assertEqual(ro(self.SYM_IMG), self.SYM_IMG)
def test_RandomOrder_empty_list(self):
ro = t.RandomOrder([])
self.assertEqual(ro(self.SYM_IMG), self.SYM_IMG)
def test_RandomOrder_transforms_not_iterable(self):
def fn(*args):
return args
with self.assertRaises(ValueError):
t.RandomOrder(fn)
def test_RandomOrder_transforms_not_callable(self):
with self.assertRaises(ValueError):
t.RandomOrder([0, 1])
def test_RandomOrder_not_PIL_image(self):
with self.assertRaises(ValueError):
t.RandomOrder(None)(np.random.randint((10, 10, 3)))
def test_PowerPil_rotate_not_bool(self):
with self.assertRaises(ValueError):
t.PowerPIL(rotate="True")
def test_PowerPil_flip_not_bool(self):
with self.assertRaises(ValueError):
t.PowerPIL(flip="True")
def test_PowerPil_colorbalance_not_float(self):
with self.assertRaises(ValueError):
t.PowerPIL(colorbalance="0.5")
def test_PowerPil_colorbalance_not_in_range(self):
with self.assertRaises(ValueError):
t.PowerPIL(colorbalance=1.1)
def test_PowerPil_contrast_not_float(self):
with self.assertRaises(ValueError):
t.PowerPIL(contrast="0.5")
def test_PowerPil_contrast_not_in_range(self):
with self.assertRaises(ValueError):
t.PowerPIL(contrast=1.1)
def test_PowerPil_brightness_not_float(self):
with self.assertRaises(ValueError):
t.PowerPIL(brightness="0.5")
def test_PowerPil_brightness_not_in_range(self):
with self.assertRaises(ValueError):
t.PowerPIL(brightness=1.1)
def test_PowerPil_sharpness_not_float(self):
with self.assertRaises(ValueError):
t.PowerPIL(sharpness="0.5")
def test_PowerPil_sharpness_not_in_range(self):
with self.assertRaises(ValueError):
t.PowerPIL(sharpness=1.1)
def test_PowerPIL(self):
pp = t.PowerPIL(
rotate=True,
flip=True,
colorbalance=0.4,
contrast=0.4,
brightness=0.4,
sharpness=0.4,
)
self.assertEqual(len(pp.transforms), 6)
if __name__ == "__main__":
unittest.main()
|
from selenium.webdriver import Firefox
b = Firefox()
url = 'http://selenium.dunossauro.live/aula_06_a.html'
b.get(url)
# usando a atributo type [attr=valor]
# nome = b.find_element_by_css_selector('[type="text"]')
# senha = b.find_element_by_css_selector('[type="password"]')
# btn = b.find_element_by_css_selector('[type="submit"]')
# usando a atributo name [attr=valor]
# nome = b.find_element_by_css_selector('[name="nome"]')
# senha = b.find_element_by_css_selector('[name="senha"]')
# btn = b.find_element_by_css_selector('[name="l0c0"]')
# usando a atributo * [att*=valor]
# nome = b.find_element_by_css_selector('[name*="ome"]')
# senha = b.find_element_by_css_selector('[name*="nha"]')
# btn = b.find_element_by_css_selector('[name*="l0"]')
# usando a atributo | [att|=valor]
# nome = b.find_element_by_css_selector('[name|="nome"]')
# senha = b.find_element_by_css_selector('[name|="senha"]')
# btn = b.find_element_by_css_selector('[name|="l0c0"]')
# usando a atributo ^ [att^=valor]
# nome = b.find_element_by_css_selector('[name^="n"]')
# senha = b.find_element_by_css_selector('[name^="s"]')
# btn = b.find_element_by_css_selector('[name^="l"]')
nome.send_keys('Eduardo')
senha.send_keys('batatinhas123')
btn.click()
|
"""Misc helper functions."""
import time
import logging
import socket
import functools
logger = logging.getLogger(__name__)
class TimeIt(object):
def __init__(self, description):
self.description = description
def __enter__(self):
self._start = time.time()
def __exit__(self, type, value, traceback):
delta = int(time.time() - self._start)
logger.info("{}: {}min {}sec".format(
self.description,
delta // 60,
delta % 60))
@functools.lru_cache(maxsize=None)
def wait_for(server, port, timeout=30):
"""Wait for the provided server to be ready at the TCP level."""
s = socket.socket()
end = time.monotonic() + timeout
while True:
next_timeout = end - time.monotonic()
if next_timeout < 0:
raise TimeoutError(f"{server}:{port} not ready "
f"after {timeout} seconds")
s.settimeout(next_timeout)
try:
s.connect((server, port))
except socket.timeout:
logger.info(f"cannot connect to {server}:{port} "
f"after {next_timeout} seconds")
continue
except socket.error as e:
logger.info(f"{server}:{port} not reachable: {e}")
time.sleep(min(next_timeout, 1, timeout/10))
continue
s.close()
break
|
# Generated by Django 4.0.2 on 2022-02-22 23:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("django_app_parameter", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="parameter",
name="is_global",
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name="parameter",
name="description",
field=models.TextField(blank=True, verbose_name="Description"),
),
migrations.AlterField(
model_name="parameter",
name="name",
field=models.CharField(max_length=100, verbose_name="Nom"),
),
migrations.AlterField(
model_name="parameter",
name="value",
field=models.CharField(max_length=250, verbose_name="Valeur"),
),
migrations.AlterField(
model_name="parameter",
name="value_type",
field=models.CharField(
choices=[
("INT", "Nombre entier"),
("STR", "Chaîne de caractères"),
("FLT", "Nombre à virgule (Float)"),
("DCL", "Nombre à virgule (Decimal)"),
("JSN", "JSON"),
],
default="STR",
max_length=3,
verbose_name="Type de donnée",
),
),
]
|
"""General utility functions.
"""
import copy
import os
import re
import socket
import threading
import uuid
from stomp.constants import *
# List of all host names (unqualified, fully-qualified, and IP
# addresses) that refer to the local host (both loopback interface
# and external interfaces). This is used for determining
# preferred targets.
LOCALHOST_NAMES = ["localhost", "127.0.0.1"]
NULL = b'\x00'
if not os.environ.get('STOMP_SKIP_HOSTNAME_SCAN'):
try:
LOCALHOST_NAMES.append(socket.gethostbyname(socket.gethostname()))
except Exception:
pass
try:
LOCALHOST_NAMES.append(socket.gethostname())
except Exception:
pass
try:
LOCALHOST_NAMES.append(socket.getfqdn(socket.gethostname()))
except Exception:
pass
def decode(byte_data, encoding='utf-8'):
"""
Decode the byte data to a string if not None.
:param bytes byte_data: the data to decode
:rtype: str
"""
if byte_data is None:
return None
return byte_data.decode(encoding, errors='replace')
def encode(char_data, encoding='utf-8'):
"""
Encode the parameter as a byte string.
:param char_data: the data to encode
:rtype: bytes
"""
if type(char_data) is str:
return char_data.encode(encoding, errors='replace')
elif type(char_data) is bytes:
return char_data
else:
raise TypeError('message should be a string or bytes, found %s' % type(char_data))
def pack(pieces=()):
"""
Join a sequence of strings together.
:param list pieces: list of strings
:rtype: bytes
"""
return b''.join(pieces)
def join(chars=()):
"""
Join a sequence of characters into a string.
:param bytes chars: list of chars
:rtype: str
"""
return b''.join(chars).decode()
##
# Used to parse STOMP header lines in the format "key:value",
#
HEADER_LINE_RE = re.compile('(?P<key>[^:]+)[:](?P<value>.*)')
##
# As of STOMP 1.2, lines can end with either line feed, or carriage return plus line feed.
#
PREAMBLE_END_RE = re.compile(b'\n\n|\r\n\r\n')
##
# As of STOMP 1.2, lines can end with either line feed, or carriage return plus line feed.
#
LINE_END_RE = re.compile('\n|\r\n')
##
# Used to replace the "passcode" to be dumped in the transport log (at debug level)
#
PASSCODE_RE = re.compile(r'passcode:\s+[^\' ]+')
ENC_NEWLINE = encode("\n")
ENC_NULL = encode(NULL)
def default_create_thread(callback):
"""
Default thread creation - used to create threads when the client doesn't want to provide their
own thread creation.
:param function callback: the callback function provided to threading.Thread
"""
thread = threading.Thread(None, callback)
thread.daemon = True # Don't let thread prevent termination
thread.start()
return thread
def is_localhost(host_and_port):
"""
Return 1 if the specified host+port is a member of the 'localhost' list of hosts, 2 if not (predominately used
as a sort key.
:param (str,int) host_and_port: tuple containing host and port
:rtype: int
"""
(host, _) = host_and_port
if host in LOCALHOST_NAMES:
return 1
else:
return 2
_HEADER_ESCAPES = {
'\r': '\\r',
'\n': '\\n',
':': '\\c',
'\\': '\\\\',
}
_HEADER_UNESCAPES = dict((value, key) for (key, value) in _HEADER_ESCAPES.items())
def _unescape_header(matchobj):
escaped = matchobj.group(0)
unescaped = _HEADER_UNESCAPES.get(escaped)
if not unescaped:
# TODO: unknown escapes MUST be treated as fatal protocol error per spec
unescaped = escaped
return unescaped
def parse_headers(lines, offset=0):
"""
Parse the headers in a STOMP response
:param list(str) lines: the lines received in the message response
:param int offset: the starting line number
:rtype: dict(str,str)
"""
headers = {}
for header_line in lines[offset:]:
header_match = HEADER_LINE_RE.match(header_line)
if header_match:
key = header_match.group('key')
key = re.sub(r'\\.', _unescape_header, key)
if key not in headers:
value = header_match.group('value')
value = re.sub(r'\\.', _unescape_header, value)
headers[key] = value
return headers
def parse_frame(frame):
"""
Parse a STOMP frame into a Frame object.
:param bytes frame: the frame received from the server (as a byte string)
:rtype: Frame
"""
f = Frame()
if frame == b'\x0a':
f.cmd = 'heartbeat'
return f
mat = PREAMBLE_END_RE.search(frame)
if mat:
preamble_end = mat.start()
body_start = mat.end()
else:
preamble_end = len(frame)
body_start = preamble_end
preamble = decode(frame[0:preamble_end])
preamble_lines = LINE_END_RE.split(preamble)
preamble_len = len(preamble_lines)
f.body = frame[body_start:]
# Skip any leading newlines
first_line = 0
while first_line < preamble_len and len(preamble_lines[first_line]) == 0:
first_line += 1
if first_line >= preamble_len:
return None
# Extract frame type/command
f.cmd = preamble_lines[first_line]
# Put headers into a key/value map
f.headers = parse_headers(preamble_lines, first_line + 1)
return f
def merge_headers(header_map_list):
"""
Helper function for combining multiple header maps into one.
:param list(dict) header_map_list: list of maps
:rtype: dict
"""
headers = {}
for header_map in header_map_list:
if header_map:
headers.update(header_map)
return headers
def clean_headers(headers):
rtn = headers
if 'passcode' in headers:
rtn = copy.copy(headers)
rtn['passcode'] = '********'
return rtn
# lines: lines returned from a call to convert_frames
def clean_lines(lines):
return re.sub(PASSCODE_RE, 'passcode:********', str(lines))
def calculate_heartbeats(shb, chb):
"""
Given a heartbeat string from the server, and a heartbeat tuple from the client,
calculate what the actual heartbeat settings should be.
:param (str,str) shb: server heartbeat numbers
:param (int,int) chb: client heartbeat numbers
:rtype: (int,int)
"""
(sx, sy) = shb
(cx, cy) = chb
x = 0
y = 0
if cx != 0 and sy != '0':
x = max(cx, int(sy))
if cy != 0 and sx != '0':
y = max(cy, int(sx))
return x, y
def convert_frame(frame):
"""
Convert a frame to a list of lines separated by newlines.
:param Frame frame: the Frame object to convert
:rtype: list(str)
"""
lines = []
body = None
if frame.body:
body = encode(frame.body)
if HDR_CONTENT_LENGTH in frame.headers:
frame.headers[HDR_CONTENT_LENGTH] = len(body)
if frame.cmd:
lines.append(encode(frame.cmd))
lines.append(ENC_NEWLINE)
for key, vals in sorted(frame.headers.items()):
if vals is None:
continue
if type(vals) != tuple:
vals = (vals,)
for val in vals:
lines.append(encode("%s:%s\n" % (key, val)))
lines.append(ENC_NEWLINE)
if body:
lines.append(body)
if frame.cmd:
lines.append(ENC_NULL)
return lines
def length(s):
"""
Null (none) safe length function.
:param str s: the string to return length of (None allowed)
:rtype: int
"""
if s is not None:
return len(s)
return 0
class Frame(object):
"""
A STOMP frame (or message).
:param str cmd: the protocol command
:param dict headers: a map of headers for the frame
:param body: the content of the frame.
"""
def __init__(self, cmd=None, headers=None, body=None):
self.cmd = cmd
self.headers = headers if headers is not None else {}
self.body = body
def __str__(self):
return '{cmd=%s,headers=[%s],body=%s}' % (self.cmd, self.headers, self.body)
def get_uuid():
return str(uuid.uuid4())
def get_errno(e):
"""
Return the errno of an exception, or the first argument if errno is not available.
:param Exception e: the exception object
"""
try:
return e.errno
except AttributeError:
return e.args[0] |
import hydra
from omegaconf import DictConfig
from trainers.agent_trainer import run
@hydra.main(config_path="configs/", config_name="experiment")
def main(cfg: DictConfig) -> None:
run(cfg.experiment_info, cfg.env, cfg.agent, cfg.memory, cfg.logging)
if __name__ == "__main__":
main()
|
# Generated by Django 3.1.6 on 2021-02-07 11:06
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20210207_1101'),
]
operations = [
migrations.AlterField(
model_name='animal',
name='date_of_birth',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 2, 7, 11, 6, 3, 95347)),
),
]
|
from readers.parsers import OcrDataParser
import cv2
import pytesseract as ocr
from unittest import TestCase
class TestReportReader(TestCase):
def setUp(self):
with open('test/fixtures/ocr_data.txt', 'r') as data:
self.raw_data = data.read()
print(self.raw_data)
self.parser = OcrDataParser()
def test_returns_list(self):
rows = self.parser.parse(self.raw_data)
self.assertIsInstance(rows, list)
# def test_returns_text(self):
# rows = self.parser.parse(self.raw_data)
# self.assertEqual(len(rows), 111)
# def test_returns_text(self):
# rows = self.parser.parse(self.raw_data)
# self.assertEqual(rows[14].text(), "316")
|
# Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Training of a recurrent neural network signal decoder."""
import abc
import math
import os
import pickle as pk
import time
import typing as t
from datetime import datetime
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import tensorflow as tf
import toml
from keras import backend as K, initializers, optimizers
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import LSTM, Bidirectional, Dense, TimeDistributed
from keras.models import Sequential
from keras.preprocessing import sequence
from exot.channel._base import Analysis
from exot.exceptions import *
from exot.util.attributedict import AttributeDict, LabelMapping
from exot.util.logging import get_root_logger
from exot.util.misc import validate_helper
from exot.util.wrangle import Matcher
from ._mixins import cut_minibatch, separate_val, stdr, stdr_val, tf_count
__all__ = "RNNdecoderTrain"
class RNNdecoderTrain(RNNdecoder):
def __init__(self, *args, **kwargs):
RNNdecoder.__init__(self, *args, **kwargs)
validate(("TRAIN"), AttributeDict)
validate(("TRAIN", "n_epochs_0"), (int))
validate(("TRAIN", "n_epochs_1"), (int))
validate(("TRAIN", "debug_step"), (int))
validate(("TRAIN", "early_stopping_threshold"), (int))
validate(("MODEL"), AttributeDict)
validate(("MODEL", "type"), str)
validate(("MODEL", "max_grad_norm"), (float, int))
validate(("MODEL", "p_dropout"), (float, int))
validate(("MODEL", "num_layers"), (int))
validate(("MODEL", "num_hidden"), (int))
validate(("MODEL", "learning_rate"), (float, int))
@property
def n_epochs_0(self):
return getattr(self.config.TRAIN, "n_epochs", 0)
@property
def n_epochs_1(self):
return getattr(self.config.TRAIN, "n_epochs", 0)
@property
def start_epoch(self):
if hasattr(self.config.TRAIN, "start_epoch") and not hasattr(self, "_start_epoch"):
self._start_epoch = self.config.TRAIN["start_epoch"]
return getattr(self, "_start_epoch", 0)
@start_epoch.setter
def start_epoch(self, value: int) -> None:
self._start_epoch = value
@property
def debug_step(self):
return getattr(self.config.TRAIN, "debug_step", 0)
@property
def early_stopping_threshold(self):
return getattr(self.config.TRAIN, "early_stopping_threshold", 0)
@property
def max_grad_norm(self):
return getattr(self.config.MODEL, "max_grad_norm", None)
@property
def p_dropout(self):
return getattr(self.config.MODEL, "p_dropout", None)
@property
def p_keep(self):
return 1 - self.model_p_dropout
@property
def num_layers(self):
return getattr(self.config.MODEL, "num_layers", None)
@property
def num_hidden(self):
return getattr(self.config.MODEL, "num_hidden", None)
@property
def learning_rate_0(self):
return getattr(self.config.MODEL, "learning_rate_0", None)
@property
def learning_rate_1(self):
return getattr(self.config.MODEL, "learning_rate_1", None)
@property
def decay(self):
return getattr(self.config.MODEL, "decay", 0)
@property
def max_timesteps(self):
return getattr(self.config.MODEL, "max_timesteps", 0)
# -------------------------------------------------------------------------------------------- #
# Overwrites #
# -------------------------------------------------------------------------------------------- #
@property
def path(self) -> Path:
return self.experiment.path.joinpath(self.name)
# -------------------------------------------------------------------------------------------- #
# Methods #
# -------------------------------------------------------------------------------------------- #
def read(self):
pass
def write(self):
pass
def _execute_handler(self, *args, **kwargs):
# Model implementation, training and validation
# --------------------------------------------------------------------------------------------------
# Setup directory for storing the results
time = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
save_path = "./results/" + time + "_train_Mulpacks_gandalf_norm"
if not os.path.isdir(save_path):
os.mkdir(save_path)
print("save_path=" + save_path)
# --------------------------------------------------------------------------------------------------
# Prepare the input data
(label, freqv) = self.read_data()
print("Total {} samples".format(str(len(freqv))))
# Convert to numpy
freqv = np.array(freqv)
label = np.array(label)
# --------------------------------------------------------------------------------------------------
# Separate the training and validation data
freqv, val_freqv, label, val_label, val_index = separate_val(freqv, label)
# Standarise the freqv and validation data
freqv, mean, std = stdr(freqv)
val_freqv = stdr_val(val_freqv, mean, std)
# --------------------------------------------------------------------------------------------------
# Pad the freqv and validation
freqv = sequence.pad_sequences(
freqv, maxlen=max_timesteps, dtype="float64", padding="post"
)
val_freqv = sequence.pad_sequences(
val_freqv, maxlen=max_timesteps, dtype="float64", padding="post"
)
label = sequence.pad_sequences(label, dtype="int64", padding="post")
val_label = sequence.pad_sequences(val_label, dtype="int64", padding="post")
print("label_len=" + str(label.shape[1]) + ", val_label_len=" + str(val_label.shape[1]))
self.label_len = max(label.shape[1], val_label.shape[1])
label = sequence.pad_sequences(
label, maxlen=self.label_len, dtype="int64", padding="post"
)
val_label = sequence.pad_sequences(
val_label, maxlen=self.label_len, dtype="int64", padding="post"
)
print(
"new: label_len="
+ str(label.shape[1])
+ ", val_label_len="
+ str(val_label.shape[1])
)
# --------------------------------------------------------------------------------------------------
# Define the model
model = Sequential()
model.add(
Bidirectional(
LSTM(
72,
return_sequences=True,
implementation=1,
activation="tanh",
recurrent_activation="sigmoid",
),
input_shape=(max_timesteps, 1),
)
)
model.add(
Bidirectional(
LSTM(
72,
return_sequences=True,
implementation=1,
activation="tanh",
recurrent_activation="sigmoid",
)
)
)
model.add(
Bidirectional(
LSTM(
72,
return_sequences=True,
implementation=1,
activation="tanh",
recurrent_activation="sigmoid",
)
)
)
model.add(TimeDistributed(Dense(5, activation="softmax")))
model.compile(
optimizer=optimizers.SGD(
lr=self.learning_rate_0,
momentum=self.momentum,
decay=self.decay,
nesterov=True,
clipnorm=self.max_grad_norm,
),
loss=self.ctc_cost_tensorflow,
)
model.summary()
# --------------------------------------------------------------------------------------------------
# Write the config.txt
config = open(save_path + "/config_info.txt", "w")
config.write(time + "\n")
config.write("max_timesteps={}\n".format(max_timesteps))
config.write("self.batch_size={}\n".format(self.batch_size))
config.write("Total {} samples\n".format(str(len(label))))
config.write("self.n_epochs_0={}\n".format(self.n_epochs_0))
config.close()
# --------------------------------------------------------------------------------------------------
# Save the configuration and training/val set
configuration = (
time,
max_timesteps,
self.batch_size,
self.n_epochs_0,
freqv,
val_freqv,
label,
val_label,
val_index,
mean,
std,
)
config_tuple = open(save_path + "/config", "wb")
pickle.dump(configuration, config_tuple)
config_tuple.close()
# --------------------------------------------------------------------------------------------------
# Train the model, log the history
checkpointer = ModelCheckpoint(
filepath=save_path + "/Output_checkpoint_best", verbose=1, save_best_only=True
)
hist = model.fit(
freqv,
label,
batch_size=self.batch_size,
epochs=self.n_epochs_0,
shuffle=True,
validation_data=(val_freqv, val_label),
callbacks=[checkpointer],
)
history = open(save_path + "/history.txt", "w")
history.write(str(hist.history))
print(hist.history)
history.close()
# --------------------------------------------------------------------------------------------------
# Save the model
model.save(save_path + "/Output_final")
####################################################################################################
# Continue model training
# --------------------------------------------------------------------------------------------------
self.label_len = label.shape[1]
model.compile(
optimizer=optimizers.SGD(
lr=self.learning_rate_1,
momentum=self.momentum,
decay=self.decay,
nesterov=True,
clipnorm=self.max_grad_norm,
),
loss=self.ctc_cost_tensorflow,
)
model.summary()
# --------------------------------------------------------------------------------------------------
# Check what is the starting score
score = model.evaluate(val_pattern, val_label, batch_size=self.batch_size)
print("start with val_loss=" + str(score))
# --------------------------------------------------------------------------------------------------
# Create direcotry to save results
time = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
save_path = "./results/" + time + " <<<< " + load_time
if not os.isdir(save_path):
os.mkdir(save_path)
print("save_path=" + save_path)
# --------------------------------------------------------------------------------------------------
# Write the config.txt
config = open(save_path + "/config_info.txt", "w")
config.write(time + "\n")
config.write("max_len={}\n".format(max_len))
config.write("self.batch_size={}\n".format(self.batch_size))
config.write("Total {} samples\n".format(str(len(label))))
config.write("self.n_epochs_1={}\n".format(self.n_epochs_1))
config.close()
# --------------------------------------------------------------------------------------------------
# Save the configuration and training/val set
configuration = (
time,
max_len,
self.batch_size,
self.n_epochs_1,
pattern,
val_pattern,
label,
val_label,
val_index,
mean,
std,
)
config_tuple = open(save_path + "/config", "wb")
pk.dump(configuration, config_tuple)
config_tuple.close()
# --------------------------------------------------------------------------------------------------
# Train the model, log the history
checkpointer = ModelCheckpoint(
filepath=save_path + "/Output_checkpoint_best", verbose=1, save_best_only=True
)
hist = model.fit(
pattern,
label,
batch_size=self.batch_size,
epochs=self.n_epochs_1,
shuffle=True,
validation_data=(val_pattern, val_label),
callbacks=[checkpointer],
)
history = open(save_path + "/history.txt", "w")
history.write(str(hist.history))
print(hist.history)
history.close()
|
#
# Copyright (C) 2016 UAVCAN Development Team <uavcan.org>
#
# This software is distributed under the terms of the MIT License.
#
# Author: Pavel Kirienko <pavel.kirienko@zubax.com>
#
import logging
import multiprocessing
import os
import sys
import time
import tempfile
assert sys.version[0] == '3'
#
# Configuring logging before other packages are imported
#
if '--debug' in sys.argv:
sys.argv.remove('--debug')
logging_level = logging.DEBUG
else:
logging_level = logging.INFO
logging.basicConfig(stream=sys.stderr, level=logging_level,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
log_file = tempfile.NamedTemporaryFile(mode='w', prefix='uavcan_gui_tool-', suffix='.log', delete=False)
file_handler = logging.FileHandler(log_file.name)
file_handler.setLevel(logging_level)
file_handler.setFormatter(logging.Formatter('%(asctime)s [%(process)d] %(levelname)-8s %(name)-25s %(message)s'))
logging.root.addHandler(file_handler)
logger = logging.getLogger(__name__.replace('__', ''))
logger.info('Spawned')
#
# Applying Windows-specific hacks
#
os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.dirname(sys.executable) # Otherwise it fails to load on Win 10
#
# Configuring multiprocessing.
# Start method must be configured globally, and only once. Using 'spawn' ensures full compatibility with Windoze.
# We need to check first if the start mode is already configured, because this code will be re-run for every child.
#
if multiprocessing.get_start_method(True) != 'spawn':
multiprocessing.set_start_method('spawn')
#
# Importing other stuff once the logging has been configured
#
import uavcan
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QVBoxLayout, QSplitter, QAction
from PyQt5.QtGui import QKeySequence, QDesktopServices
from PyQt5.QtCore import QTimer, Qt, QUrl
from .version import __version__
from .iface_configurator import run_iface_config_window
from .active_data_type_detector import ActiveDataTypeDetector
from . import update_checker
from .widgets import show_error, get_icon, get_app_icon
from .widgets.node_monitor import NodeMonitorWidget
from .widgets.local_node import LocalNodeWidget
from .widgets.log_message_display import LogMessageDisplayWidget
from .widgets.bus_monitor import BusMonitorManager
from .widgets.dynamic_node_id_allocator import DynamicNodeIDAllocatorWidget
from .widgets.file_server import FileServerWidget
from .widgets.node_properties import NodePropertiesWindow
from .widgets.console import ConsoleManager, InternalObjectDescriptor
from .widgets.subscriber import SubscriberWindow
from .widgets.plotter import PlotterManager
from .widgets.about_window import AboutWindow
from .widgets.can_adapter_control_panel import spawn_window as spawn_can_adapter_control_panel
from .widgets.motor_efficient_analysis import analysisManager
from .panels import PANELS
NODE_NAME = 'org.uavcan.gui_tool'
class MainWindow(QMainWindow):
MAX_SUCCESSIVE_NODE_ERRORS = 1000
# noinspection PyTypeChecker,PyCallByClass,PyUnresolvedReferences
def __init__(self, node, iface_name):
# Parent
super(MainWindow, self).__init__()
self.setWindowTitle('UAVCAN GUI Tool')
self.setWindowIcon(get_app_icon())
self._node = node
self._successive_node_errors = 0
self._iface_name = iface_name
self._active_data_type_detector = ActiveDataTypeDetector(self._node)
self._node_spin_timer = QTimer(self)
self._node_spin_timer.timeout.connect(self._spin_node)
self._node_spin_timer.setSingleShot(False)
self._node_spin_timer.start(10)
self._node_windows = {} # node ID : window object
self._node_monitor_widget = NodeMonitorWidget(self, node)
self._node_monitor_widget.on_info_window_requested = self._show_node_window
self._local_node_widget = LocalNodeWidget(self, node)
self._log_message_widget = LogMessageDisplayWidget(self, node)
self._dynamic_node_id_allocation_widget = DynamicNodeIDAllocatorWidget(self, node,
self._node_monitor_widget.monitor)
self._file_server_widget = FileServerWidget(self, node)
self._plotter_manager = PlotterManager(self._node)
self._bus_monitor_manager = BusMonitorManager(self._node, iface_name)
# Console manager depends on other stuff via context, initialize it last
self._console_manager = ConsoleManager(self._make_console_context)
#
# File menu
#
quit_action = QAction(get_icon('sign-out'), '&Quit', self)
quit_action.setShortcut(QKeySequence('Ctrl+Shift+Q'))
quit_action.triggered.connect(self.close)
file_menu = self.menuBar().addMenu('&File')
file_menu.addAction(quit_action)
#
# Tools menu
#
show_motor_efficiency_analysis_action = QAction(get_icon('flash'), '&Motor Efficiency Analysis', self)
show_motor_efficiency_analysis_action.setShortcut(QKeySequence('Ctrl+Shift+M'))
show_motor_efficiency_analysis_action.setStatusTip('Perform Motor Efficiency Analysis')
show_motor_efficiency_analysis_action.triggered.connect(self._show_motor_efficiency_analysis_configure)
show_bus_monitor_action = QAction(get_icon('bus'), '&Bus Monitor', self)
show_bus_monitor_action.setShortcut(QKeySequence('Ctrl+Shift+B'))
show_bus_monitor_action.setStatusTip('Open bus monitor window')
show_bus_monitor_action.triggered.connect(self._bus_monitor_manager.spawn_monitor)
show_console_action = QAction(get_icon('terminal'), 'Interactive &Console', self)
show_console_action.setShortcut(QKeySequence('Ctrl+Shift+T'))
show_console_action.setStatusTip('Open interactive console window')
show_console_action.triggered.connect(self._show_console_window)
new_subscriber_action = QAction(get_icon('newspaper-o'), '&Subscriber', self)
new_subscriber_action.setShortcut(QKeySequence('Ctrl+Shift+S'))
new_subscriber_action.setStatusTip('Open subscription tool')
new_subscriber_action.triggered.connect(
lambda: SubscriberWindow.spawn(self, self._node, self._active_data_type_detector))
new_plotter_action = QAction(get_icon('area-chart'), '&Plotter', self)
new_plotter_action.setShortcut(QKeySequence('Ctrl+Shift+P'))
new_plotter_action.setStatusTip('Open new graph plotter window')
new_plotter_action.triggered.connect(self._plotter_manager.spawn_plotter)
show_can_adapter_controls_action = QAction(get_icon('plug'), 'CAN &Adapter Control Panel', self)
show_can_adapter_controls_action.setShortcut(QKeySequence('Ctrl+Shift+A'))
show_can_adapter_controls_action.setStatusTip('Open CAN adapter control panel (if supported by the adapter)')
show_can_adapter_controls_action.triggered.connect(self._try_spawn_can_adapter_control_panel)
tools_menu = self.menuBar().addMenu('&Tools')
tools_menu.addAction(show_bus_monitor_action)
tools_menu.addAction(show_console_action)
tools_menu.addAction(new_subscriber_action)
tools_menu.addAction(new_plotter_action)
tools_menu.addAction(show_can_adapter_controls_action)
tools_menu.addAction(show_motor_efficiency_analysis_action)
#
# Panels menu
panels_menu = self.menuBar().addMenu('&Panels')
for idx, panel in enumerate(PANELS):
action = QAction(panel.name, self)
icon = panel.get_icon()
if icon:
action.setIcon(icon)
if idx < 9:
action.setShortcut(QKeySequence('Ctrl+Shift+%d' % (idx + 1)))
action.triggered.connect(lambda: panel.safe_spawn(self, self._node))
panels_menu.addAction(action)
#
# Help menu
#
uavcan_website_action = QAction(get_icon('globe'), 'Open UAVCAN &Website', self)
uavcan_website_action.triggered.connect(lambda: QDesktopServices.openUrl(QUrl('http://uavcan.org')))
show_log_directory_action = QAction(get_icon('pencil-square-o'), 'Open &Log Directory', self)
show_log_directory_action.triggered.connect(
lambda: QDesktopServices.openUrl(QUrl.fromLocalFile(os.path.dirname(log_file.name))))
about_action = QAction(get_icon('info'), '&About', self)
about_action.triggered.connect(lambda: AboutWindow(self).show())
help_menu = self.menuBar().addMenu('&Help')
help_menu.addAction(uavcan_website_action)
help_menu.addAction(show_log_directory_action)
help_menu.addAction(about_action)
#
# Window layout
#
self.statusBar().show()
def make_vbox(*widgets, stretch_index=None):
box = QVBoxLayout(self)
for idx, w in enumerate(widgets):
box.addWidget(w, 1 if idx == stretch_index else 0)
container = QWidget(self)
container.setLayout(box)
container.setContentsMargins(0, 0, 0, 0)
return container
def make_splitter(orientation, *widgets):
spl = QSplitter(orientation, self)
for w in widgets:
spl.addWidget(w)
return spl
self.setCentralWidget(make_splitter(Qt.Horizontal,
make_vbox(self._local_node_widget,
self._node_monitor_widget,
self._file_server_widget),
make_splitter(Qt.Vertical,
make_vbox(self._log_message_widget),
make_vbox(self._dynamic_node_id_allocation_widget,
stretch_index=1))))
def _try_spawn_can_adapter_control_panel(self):
try:
spawn_can_adapter_control_panel(self, self._node, self._iface_name)
except Exception as ex:
show_error('CAN Adapter Control Panel error', 'Could not spawn CAN Adapter Control Panel', ex, self)
def _make_console_context(self):
default_transfer_priority = 30
active_handles = []
def print_yaml(obj):
"""
Formats the argument as YAML structure using uavcan.to_yaml(), and prints the result into stdout.
Use this function to print received UAVCAN structures.
"""
print(uavcan.to_yaml(obj))
def throw_if_anonymous():
if self._node.is_anonymous:
raise RuntimeError('Local node is configured in anonymous mode. '
'You need to set the local node ID (see the main window) in order to be able '
'to send transfers.')
def request(payload, server_node_id, callback=None, priority=None, timeout=None):
"""
Sends a service request to the specified node. This is a convenient wrapper over node.request().
Args:
payload: Request payload of type CompoundValue, e.g. uavcan.protocol.GetNodeInfo.Request()
server_node_id: Node ID of the node that will receive the request.
callback: Response callback. Default handler will print the response to stdout in YAML format.
priority: Transfer priority; defaults to a very low priority.
timeout: Response timeout, default is set according to the UAVCAN specification.
"""
if isinstance(payload, uavcan.dsdl.CompoundType):
print('Interpreting the first argument as:', payload.full_name + '.Request()')
payload = uavcan.TYPENAMES[payload.full_name].Request()
throw_if_anonymous()
priority = priority or default_transfer_priority
callback = callback or print_yaml
return self._node.request(payload, server_node_id, callback, priority=priority, timeout=timeout)
def serve(uavcan_type, callback):
"""
Registers a service server. The callback will be invoked every time the local node receives a
service request of the specified type. The callback accepts an uavcan.Event object
(refer to the PyUAVCAN documentation for more info), and returns the response object.
Example:
>>> def serve_acs(e):
>>> print_yaml(e.request)
>>> return uavcan.protocol.AccessCommandShell.Response()
>>> serve(uavcan.protocol.AccessCommandShell, serve_acs)
Args:
uavcan_type: UAVCAN service type to serve requests of.
callback: Service callback with the business logic, see above.
"""
if uavcan_type.kind != uavcan_type.KIND_SERVICE:
raise RuntimeError('Expected a service type, got a different kind')
def process_callback(e):
try:
return callback(e)
except Exception:
logger.error('Unhandled exception in server callback for %r, server terminated',
uavcan_type, exc_info=True)
sub_handle.remove()
sub_handle = self._node.add_handler(uavcan_type, process_callback)
active_handles.append(sub_handle)
return sub_handle
def broadcast(payload, priority=None, interval=None, count=None, duration=None):
"""
Broadcasts messages, either once or periodically in the background.
Periodic broadcasting can be configured with one or multiple termination conditions; see the arguments for
more info. Multiple termination conditions will be joined with logical OR operation.
Example:
# Send one message:
>>> broadcast(uavcan.protocol.debug.KeyValue(key='key', value=123))
# Repeat message every 100 milliseconds for 10 seconds:
>>> broadcast(uavcan.protocol.NodeStatus(), interval=0.1, duration=10)
# Send 100 messages with 10 millisecond interval:
>>> broadcast(uavcan.protocol.Panic(reason_text='42!'), interval=0.01, count=100)
Args:
payload: UAVCAN message structure, e.g. uavcan.protocol.debug.KeyValue(key='key', value=123)
priority: Transfer priority; defaults to a very low priority.
interval: Broadcasting interval in seconds.
If specified, the message will be re-published in the background with this interval.
If not specified (which is default), the message will be published only once.
count: Stop background broadcasting when this number of messages has been broadcasted.
By default it is not set, meaning that the periodic broadcasting will continue indefinitely,
unless other termination conditions are configured.
Setting this value without interval is not allowed.
duration: Stop background broadcasting after this amount of time, in seconds.
By default it is not set, meaning that the periodic broadcasting will continue indefinitely,
unless other termination conditions are configured.
Setting this value without interval is not allowed.
Returns: If periodic broadcasting is configured, this function returns a handle that implements a method
'remove()', which can be called to stop the background job.
If no periodic broadcasting is configured, this function returns nothing.
"""
# Validating inputs
if isinstance(payload, uavcan.dsdl.CompoundType):
print('Interpreting the first argument as:', payload.full_name + '()')
payload = uavcan.TYPENAMES[payload.full_name]()
if (interval is None) and (duration is not None or count is not None):
raise RuntimeError('Cannot setup background broadcaster: interval is not set')
throw_if_anonymous()
# Business end is here
def do_broadcast():
self._node.broadcast(payload, priority or default_transfer_priority)
do_broadcast()
if interval is not None:
num_broadcasted = 1 # The first was broadcasted before the job was launched
if duration is None:
duration = 3600 * 24 * 365 * 1000 # See you in 1000 years
deadline = time.monotonic() + duration
def process_next():
nonlocal num_broadcasted
try:
do_broadcast()
except Exception:
logger.error('Automatic broadcast failed, job cancelled', exc_info=True)
timer_handle.remove()
else:
num_broadcasted += 1
if (count is not None and num_broadcasted >= count) or (time.monotonic() >= deadline):
logger.info('Background publisher for %r has stopped',
uavcan.get_uavcan_data_type(payload).full_name)
timer_handle.remove()
timer_handle = self._node.periodic(interval, process_next)
active_handles.append(timer_handle)
return timer_handle
def subscribe(uavcan_type, callback=None, count=None, duration=None, on_end=None):
"""
Receives specified UAVCAN messages from the bus and delivers them to the callback.
Args:
uavcan_type: UAVCAN message type to listen for.
callback: Callback will be invoked for every received message.
Default callback will print the response to stdout in YAML format.
count: Number of messages to receive before terminating the subscription.
Unlimited by default.
duration: Amount of time, in seconds, to listen for messages before terminating the subscription.
Unlimited by default.
on_end: Callable that will be invoked when the subscription is terminated.
Returns: Handler with method .remove(). Calling this method will terminate the subscription.
"""
if (count is None and duration is None) and on_end is not None:
raise RuntimeError('on_end is set, but it will never be called because the subscription has '
'no termination condition')
if uavcan_type.kind != uavcan_type.KIND_MESSAGE:
raise RuntimeError('Expected a message type, got a different kind')
callback = callback or print_yaml
def process_callback(e):
nonlocal count
stop_now = False
try:
callback(e)
except Exception:
logger.error('Unhandled exception in subscription callback for %r, subscription terminated',
uavcan_type, exc_info=True)
stop_now = True
else:
if count is not None:
count -= 1
if count <= 0:
stop_now = True
if stop_now:
sub_handle.remove()
try:
timer_handle.remove()
except Exception:
pass
if on_end is not None:
on_end()
def cancel_callback():
try:
sub_handle.remove()
except Exception:
pass
else:
if on_end is not None:
on_end()
sub_handle = self._node.add_handler(uavcan_type, process_callback)
timer_handle = None
if duration is not None:
timer_handle = self._node.defer(duration, cancel_callback)
active_handles.append(sub_handle)
return sub_handle
def periodic(period_sec, callback):
"""
Calls the specified callback with the specified time interval.
"""
handle = self._node.periodic(period_sec, callback)
active_handles.append(handle)
return handle
def defer(delay_sec, callback):
"""
Calls the specified callback after the specified amount of time.
"""
handle = self._node.defer(delay_sec, callback)
active_handles.append(handle)
return handle
def stop():
"""
Stops all periodic broadcasts (see broadcast()), terminates all subscriptions (see subscribe()),
and cancels all deferred and periodic calls (see defer(), periodic()).
"""
for h in active_handles:
try:
logger.debug('Removing handle %r', h)
h.remove()
except Exception:
pass
active_handles.clear()
def can_send(can_id, data, extended=False):
"""
Args:
can_id: CAN ID of the frame
data: Payload as bytes()
extended: True to send a 29-bit frame; False to send an 11-bit frame
"""
self._node.can_driver.send(can_id, data, extended=extended)
return [
InternalObjectDescriptor('can_iface_name', self._iface_name,
'Name of the CAN bus interface'),
InternalObjectDescriptor('node', self._node,
'UAVCAN node instance'),
InternalObjectDescriptor('node_monitor', self._node_monitor_widget.monitor,
'Object that stores information about nodes currently available on the bus'),
InternalObjectDescriptor('request', request,
'Sends UAVCAN request transfers to other nodes'),
InternalObjectDescriptor('serve', serve,
'Serves UAVCAN service requests'),
InternalObjectDescriptor('broadcast', broadcast,
'Broadcasts UAVCAN messages, once or periodically'),
InternalObjectDescriptor('subscribe', subscribe,
'Receives UAVCAN messages'),
InternalObjectDescriptor('periodic', periodic,
'Invokes a callback from the node thread with the specified time interval'),
InternalObjectDescriptor('defer', defer,
'Invokes a callback from the node thread once after the specified timeout'),
InternalObjectDescriptor('stop', stop,
'Stops all ongoing tasks of broadcast(), subscribe(), defer(), periodic()'),
InternalObjectDescriptor('print_yaml', print_yaml,
'Prints UAVCAN entities in YAML format'),
InternalObjectDescriptor('uavcan', uavcan,
'The main Pyuavcan module'),
InternalObjectDescriptor('main_window', self,
'Main window object, holds references to all business logic objects'),
InternalObjectDescriptor('can_send', can_send,
'Sends a raw CAN frame'),
]
def _show_motor_efficiency_analysis_configure(self):
self._analysisManager=analysisManager(self,self._node)
self._analysisWindows=self._analysisManager._spawnAnalysisWindow()
# self._analysisPlots=self._analysisManager._spawnPlotsWindow()
def _show_console_window(self):
try:
self._console_manager.show_console_window(self)
except Exception as ex:
logger.error('Could not spawn console', exc_info=True)
show_error('Console error', 'Could not spawn console window', ex, self)
return
def _show_node_window(self, node_id):
if node_id in self._node_windows:
# noinspection PyBroadException
try:
self._node_windows[node_id].close()
self._node_windows[node_id].setParent(None)
self._node_windows[node_id].deleteLater()
except Exception:
pass # Sometimes fails with "wrapped C/C++ object of type NodePropertiesWindow has been deleted"
del self._node_windows[node_id]
w = NodePropertiesWindow(self, self._node, node_id, self._file_server_widget,
self._node_monitor_widget.monitor, self._dynamic_node_id_allocation_widget)
w.show()
self._node_windows[node_id] = w
def _spin_node(self):
# We're running the node in the GUI thread.
# This is not great, but at the moment seems like other options are even worse.
try:
self._node.spin(0)
self._successive_node_errors = 0
except Exception as ex:
self._successive_node_errors += 1
msg = 'Node spin error [%d of %d]: %r' % (self._successive_node_errors, self.MAX_SUCCESSIVE_NODE_ERRORS, ex)
if self._successive_node_errors >= self.MAX_SUCCESSIVE_NODE_ERRORS:
show_error('Node failure',
'Local UAVCAN node has generated too many errors and will be terminated.\n'
'Please restart the application.',
msg, self)
self._node_spin_timer.stop()
self._node.close()
logger.error(msg, exc_info=True)
self.statusBar().showMessage(msg, 3000)
def closeEvent(self, qcloseevent):
self._plotter_manager.close()
self._console_manager.close()
self._active_data_type_detector.close()
super(MainWindow, self).closeEvent(qcloseevent)
def main():
logger.info('Starting the application')
app = QApplication(sys.argv)
while True:
# Asking the user to specify which interface to work with
try:
iface, iface_kwargs = run_iface_config_window(get_app_icon())
if not iface:
sys.exit(0)
except Exception as ex:
show_error('Fatal error', 'Could not list available interfaces', ex, blocking=True)
sys.exit(1)
# Trying to start the node on the specified interface
try:
node_info = uavcan.protocol.GetNodeInfo.Response()
node_info.name = NODE_NAME
node_info.software_version.major = __version__[0]
node_info.software_version.minor = __version__[1]
node = uavcan.make_node(iface,
node_info=node_info,
mode=uavcan.protocol.NodeStatus().MODE_OPERATIONAL,
**iface_kwargs)
# Making sure the interface is alright
node.spin(0.1)
except uavcan.transport.TransferError:
# allow unrecognized messages on startup:
logger.warn('UAVCAN Transfer Error occured on startup', exc_info=True)
break
except Exception as ex:
logger.error('UAVCAN node init failed', exc_info=True)
show_error('Fatal error', 'Could not initialize UAVCAN node', ex, blocking=True)
else:
break
logger.info('Creating main window; iface %r', iface)
window = MainWindow(node, iface)
window.show()
try:
update_checker.begin_async_check(window)
except Exception:
logger.error('Could not start update checker', exc_info=True)
logger.info('Init complete, invoking the Qt event loop')
exit_code = app.exec_()
node.close()
sys.exit(exit_code)
|
import sys
class CaptureStream(object):
def __init__(self):
self.contents = ""
def write(self, text):
self.contents += text
def close(self):
pass
def flush(self):
pass
# The class name here is lowercase as it is a context manager, which
# typically tend to me lowercase.
class capture_output(object): # noqa pylint:disable=invalid-name
def __init__(self, hide):
self.hide = hide
def __enter__(self):
if self.hide:
self._prev_streams = [
sys.stdout,
sys.stderr,
sys.__stdout__,
sys.__stderr__,
]
self.stdout = CaptureStream()
self.stderr = CaptureStream()
sys.stdout, sys.__stdout__ = self.stdout, self.stdout
sys.stderr, sys.__stderr__ = self.stderr, self.stderr
return self
def get_hidden_stdout(self):
return self.stdout.contents
def get_hidden_stderr(self):
return self.stderr.contents
def __exit__(self, exc_type, exc_val, exc_tb):
if self.hide:
sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = self._prev_streams
del self._prev_streams
|
from rest_framework import viewsets
from rest_framework.views import APIView
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from rest_framework import status
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from . import serializers
from . import models
from . import permissions
class HelloApiView(APIView):
"""
Test Api view
"""
# hello_serializer = serializers.HelloSerializer
def get(self, request, format=None):
"""
Return a list of APIView features
"""
an_apiview = [
'Use HTTP method as function (get, post, patch, put, delete)',
'Is similar to a traditional Django view',
'Give you the most control over your application logic',
'Is mapped manually to URL',
]
return Response({'message' : 'Hello', 'an_apiview' : an_apiview})
def post(self, request):
"""
Create a hello message with our name
"""
serializer = serializers.HelloSerializer(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
email = serializer.validated_data.get('email')
print(name)
message = f'Hello {name}, your email is {email}'
print(message)
return Response({"message" : message})
else:
print("error")
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self, request, pk=None):
"""
Handle updating an object
"""
return Response({"method" : "PUT"})
def patch(self, request, pk=None):
"""
Handle a partial update of an object
"""
return Response({"method" : "PATCH"})
def delete(self, request, pk=None):
"""
Handle a delete of update
"""
return Response({"method":"DELETE"})
class HelloViewSet(ViewSet):
"""
Test API ViewSet
"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""
Return a list from db
"""
a_viewset = [
'Uses actions (list, create, retrive, update, partial_update, delete)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code',
]
return Response({'message' : 'Hello', 'a_viewset' : a_viewset})
def create(self, request):
"""
Create a new hello message
"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(serializer.errors, status=status.status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
"""
Handle getting an object by its ID
"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""
Handle updating an object by its ID
"""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
"""
Handle getting an object by its ID
"""
return Response({'http_method': 'PATCH'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""
handle creating and updating profiles
"""
serializer_class = serializers.UserProfilesSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email')
|
import torch
import argparse
from glob import glob
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('--type', type=str, default='0', help='fusion type') # 0: AvgTrim 1: DupTrim 2: Concat
opt = parser.parse_args()
assert(int(opt.type) >= 0 and int(opt.type) <= 2)
files = glob('I3D_RGB/*')
for i in tqdm(files):
videoFeats = torch.load(i)
audioFeats = torch.load(i.replace('I3D_RGB', 'AudioFeats'))
# AvgTrim
if opt.type == "0":
k = audioFeats.shape[0] // videoFeats.shape[0]
combinedFeats = torch.zeros(1, 128)
for j in range(0, audioFeats.shape[0], 2):
combinedFeats = torch.cat([combinedFeats, torch.mean(audioFeats[j:j+k, :], 0).unsqueeze(0)], dim=0)
combinedFeats = combinedFeats[1:, :]
commonSize = min(combinedFeats.shape[0], videoFeats.shape[0])
videoFeats = videoFeats[: commonSize, :]
combinedFeats = combinedFeats[: commonSize, :]
combinedFeats = torch.cat([videoFeats, combinedFeats], dim=1)
torch.save(combinedFeats, i.replace('I3D_RGB', 'combinedFeats'))
continue
# DupTrim
if opt.type == "1":
if audioFeats.shape[0] > videoFeats.shape[0]:
k = audioFeats.shape[0] // videoFeats.shape[0]
videoFeats = videoFeats.unsqueeze(1).repeat(1, k, 1)
videoFeats = videoFeats.reshape(videoFeats.shape[0] * videoFeats.shape[1], videoFeats.shape[2])
else:
k = videoFeats.shape[0] // audioFeats.shape[0]
audioFeats = audioFeats.unsqueeze(1).repeat(1, k, 1)
audioFeats = audioFeats.reshape(audioFeats.shape[0] * audioFeats.shape[1], audioFeats.shape[2])
commonSize = min(audioFeats.shape[0], videoFeats.shape[0])
videoFeats = videoFeats[: commonSize, :]
audioFeats = audioFeats[: commonSize, :]
combinedFeats = torch.cat([videoFeats, audioFeats], dim=1)
torch.save(combinedFeats, i.replace('I3D_RGB', 'combinedFeats'))
continue
# Concat
if opt.type == "2":
combinedFeats = torch.cat([videoFeats, audioFeats], dim=1)
torch.save(combinedFeats, i.replace('I3D_RGB', 'combinedFeats'))
continue
|
#!/usr/bin/env python
import unittest
import inspect
class MagicTest(unittest.TestCase):
@classmethod
def _get_test_funcs(cls):
testcase_methods = dir(unittest.TestCase)
for m in inspect.classify_class_attrs(cls):
if m.kind == 'method' and \
m.defining_class == cls and \
not m.name.startswith('_') and \
m.name not in testcase_methods:
yield (inspect.findsource(getattr(cls, m.name))[1],
m.name)
@classmethod
def toSuite(cls):
funcs = sorted(cls._get_test_funcs())
suite = unittest.TestSuite()
for lineno, name in funcs:
suite.addTest(cls(name))
return suite
@classmethod
def runSuite(cls, vb=2):
return unittest.TextTestRunner(verbosity=vb).run(cls.toSuite())
def suite(mod):
print 'suiting',mod
def meta():
thesuite = unittest.TestSuite()
module = __import__(mod)
for sub in mod.split('.')[1:]:
module = getattr(module, sub)
for k,v in module.__dict__.iteritems():
if inspect.isclass(v) and issubclass(v, MagicTest) and v.__module__ == mod:
thesuite.addTest(v.toSuite())
return thesuite
return meta
def modsuite(*mods):
def meta():
return unittest.TestSuite(mod.all_tests() for mod in mods)
return meta
# vim: et sw=4 sts=4
|
"""
Tests for public API v2 endpoints
"""
from flask import json
import os
import pytest
from common import daily_push_ny_wa_two_days
from app.api.public_v2 import ValuesCalculator, CoreData, datetime, State, Batch, db, pytz
def write_and_publish_data(client, headers, data_json_str):
# Write a batch containing two days each of NY and WA
resp = client.post(
"/api/v1/batches",
data=data_json_str,
content_type='application/json',
headers=headers)
assert resp.status_code == 201
batch_id = resp.json['batch']['batchId']
resp = client.post("/api/v1/batches/{}/publish".format(batch_id), headers=headers)
assert resp.status_code == 201
def test_get_states_daily_basic(app, headers):
client = app.test_client()
# write and publish some test data
example_filename = os.path.join(os.path.dirname(__file__), 'data.json')
with open(example_filename) as f:
payload_json_str = f.read()
write_and_publish_data(client, headers, payload_json_str)
# can we read the data back out?
resp = client.get("/api/v2/public/states/daily")
assert resp.status_code == 200
for k in ['links', 'meta', 'data']:
assert k in resp.json
# should have two days worth of data, for 56 states
assert len(resp.json['data']) == 56 * 2
# check that the states are sorted alphabetically, doubled - test data is 2 days
returned_states = [x['state'] for x in resp.json['data']]
set1 = sorted(list(set(returned_states)))
assert returned_states == set1 + set1
# check that there are 2 different dates
returned_dates = list(set([x['date'] for x in resp.json['data']]))
assert len(returned_dates) == 2
# check that getting data for 1 state also works
resp = client.get("/api/v2/public/states/NY/daily")
assert resp.status_code == 200
assert len(resp.json['data']) == 2
returned_states = set([x['state'] for x in resp.json['data']])
assert returned_states == {'NY'}
def test_get_states_daily_simple(app, headers):
test_data = daily_push_ny_wa_two_days() # two days each of NY and WA
client = app.test_client()
write_and_publish_data(client, headers, json.dumps(test_data))
resp = client.get("/api/v2/public/states/daily/simple")
assert resp.status_code == 200
assert len(resp.json['data']) == 4 # should have 2 days of data for 2 states
# should come back in reverse chronological order
first_data = resp.json['data'][0]
assert first_data['date'] == '2020-05-25'
assert first_data['state'] == 'NY'
assert first_data['tests']['pcr']['people']['positive'] == 20
assert first_data['tests']['pcr']['people']['negative'] == 5
second_data = resp.json['data'][1]
assert second_data['date'] == '2020-05-25'
assert second_data['state'] == 'WA'
assert second_data['tests']['pcr']['people']['positive'] == 10
assert second_data['tests']['pcr']['people']['negative'] == 10
def test_get_states_daily_full(app, headers):
test_data = daily_push_ny_wa_two_days() # two days each of NY and WA
client = app.test_client()
write_and_publish_data(client, headers, json.dumps(test_data))
resp = client.get("/api/v2/public/states/daily")
assert resp.status_code == 200
assert len(resp.json['data']) == 4 # should have 2 days of data for 2 states
# should come back in reverse chronological order
first_data = resp.json['data'][0]
assert first_data['date'] == '2020-05-25'
assert first_data['state'] == 'NY'
assert first_data['tests']['pcr']['people']['positive']['value'] == 20
assert first_data['tests']['pcr']['people']['negative']['value'] == 5
# make sure calculated values are correct
assert first_data['tests']['pcr']['people']['negative']['calculated'] == {
'population_percent': 0,
'change_from_prior_day': 1,
'seven_day_average': 4,
'seven_day_change_percent': None,
}
second_data = resp.json['data'][1]
assert second_data['date'] == '2020-05-25'
assert second_data['state'] == 'WA'
assert second_data['tests']['pcr']['people']['positive']['value'] == 10
assert second_data['tests']['pcr']['people']['negative']['value'] == 10
def test_values_calculator(app):
with app.app_context():
nys = State(state='NY', totalTestResultsFieldDbColumn='posNeg')
bat = Batch(batchNote='test', createdAt=datetime.now(),
isPublished=False, isRevision=False)
db.session.add(bat)
db.session.add(nys)
db.session.flush()
now_utc = datetime(2020, 5, 4, 20, 3, tzinfo=pytz.UTC)
core_data_row = CoreData(
lastUpdateIsoUtc=now_utc.isoformat(), dateChecked=now_utc.isoformat(),
date=datetime.today(), state='NY', batchId=bat.batchId,
positive=596214, negative=5, dataQualityGrade='A')
calculator = ValuesCalculator([core_data_row])
assert calculator.population_percent(core_data_row, 'positive') == 3.0462
assert calculator.calculate_values(core_data_row, 'dataQualityGrade') == None
def test_get_state_info_v2(app):
client = app.test_client()
with app.app_context():
nys = State(state='NY', name='New York', pum=False, notes='Testing123',
totalTestResultsFieldDbColumn="totalTestsViral", covid19Site="example.com")
wa = State(state='WA', name='Washington', pum=False, notes='Testing321',
totalTestResultsFieldDbColumn="totalTestsViral")
db.session.add(nys)
db.session.add(wa)
db.session.commit()
resp = client.get("/api/v2/public/states")
assert resp.status_code == 200
respjson = resp.json
assert len(respjson['data']) == 2
assert respjson['data'][0]['name'] == 'New York'
assert respjson['data'][0]['census']['population'] == 19572319
assert respjson['data'][0]['fips'] == '36'
assert len(respjson['data'][0]['sites']) == 1 # undefined sites should be omitted
assert respjson['data'][0]['sites'][0]['label'] == 'primary'
def test_get_us_daily(app, headers):
test_data = daily_push_ny_wa_two_days()
client = app.test_client()
# Write and publish a batch containing the above data, two days each of NY and WA
resp = client.post(
"/api/v1/batches",
data=json.dumps(test_data),
content_type='application/json',
headers=headers)
assert resp.status_code == 201
batch_id = resp.json['batch']['batchId']
resp = client.post("/api/v1/batches/{}/publish".format(batch_id),
headers=headers)
assert resp.status_code == 201
resp = client.get("/api/v2/public/us/daily")
assert resp.status_code == 200
assert len(resp.json['data']) == 2
# should come back in reverse chronological order
first_data = resp.json['data'][0]
assert first_data['date'] == '2020-05-25'
assert first_data['states'] == 2
assert first_data['testing']['total']['value'] == 45
# make sure calculated values are correct
assert first_data['testing']['total']['calculated'] == {
'population_percent': 0.0,
'change_from_prior_day': 9,
'seven_day_change_percent': None,
}
second_data = resp.json['data'][1]
assert second_data['date'] == '2020-05-24'
assert second_data['states'] == 2
assert second_data['testing']['total']['value'] == 36
|
import json
from paprika.system.logger.Logger import Logger
from requests import post
from paprika.system.Traceback import Traceback
class Message:
def __init__(self):
pass
@staticmethod
def get_header(response, name):
for key in response.headers.keys():
if key.lower() == name:
result = response.headers[key]
return result
@staticmethod
def post_request(url, message):
logger = Logger(Message())
try:
headers = {'Content-Type': 'application/json'}
logger.debug('', 'url : ' + url + ', message : ' + json.dumps(message))
response = post(url, json.dumps(message), headers=headers)
content_type = Message.get_header(response, 'content-type')
logger.debug('', 'url : ' + url + ', response : ' + str(response.status_code) + ', message : ' + json.dumps(message))
logger.debug('', 'url : ' + url + ', content_type : ' + str(content_type) + ', message : ' + json.dumps(message))
if content_type == 'application/json':
result = json.loads(response.content)
result['status_code'] = response.status_code
else:
result = dict()
result['state'] = 'FAILED'
result['status_code'] = response.status_code
result['message'] = response.reason
result['backtrace'] = response.text
return result
except:
result = Traceback.build()
result['state'] = 'FAILED'
result['status_code'] = 400
return result
|
from setuptools import find_packages, setup
setup(
name="forge_pathplanner",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.7",
url="https://github.com/uci-uav-forge/pathplanner",
author="Mike Sutherland",
license="MIT",
install_requires=[
"aiohttp==3.8.0",
"aiosignal==1.2.0",
"apptools==5.1.0",
"async-timeout==4.0.0",
"asynctest==0.13.0",
"attrs==21.2.0",
"charset-normalizer==2.0.7",
"configobj==5.0.6",
"cvxpy==1.1.17",
"cycler==0.11.0",
"ecos==2.0.7.post1",
"envisage==6.0.1",
"euclid3==0.1",
"frozenlist==1.2.0",
"idna==3.3",
"importlib-metadata==4.8.1",
"importlib-resources==5.4.0",
"kiwisolver==1.3.2",
"matplotlib==3.4.3",
"mayavi==4.7.3",
"multidict==5.2.0",
"networkx==2.6.3",
"numpy==1.21.4",
"osqp==0.6.2.post0",
"Pillow==8.4.0",
"pyface==7.3.0",
"Pygments==2.10.0",
"pykdtree==1.3.4",
"pyparsing==3.0.4",
"PyQt5==5.15.6",
"PyQt5-Qt5==5.15.2",
"PyQt5-sip==12.9.0",
"python-dateutil==2.8.2",
"qdldl==0.1.5.post0",
"rrtpp @ git+https://github.com/rland93/rrt_pathplanner@c4ab7671854c6f1ac74b8b6033a07920391a8ed3",
"scipy==1.7.2",
"scs==2.1.4",
"six==1.16.0",
"tqdm==4.62.3",
"traits==6.3.1",
"traitsui==7.2.1",
"typing-extensions==3.10.0.2",
"vtk==8.1.2",
"wslink==1.1.0",
"yarl==1.7.2",
"zipp==3.6.0",
],
zip_safe=True,
) |
import FWCore.ParameterSet.Config as cms
occupancyplots = cms.EDAnalyzer('OccupancyPlots',
multiplicityMaps = cms.VInputTag(cms.InputTag("ssclusmultprod")),
occupancyMaps = cms.VInputTag(cms.InputTag("ssclusoccuprod")),
wantedSubDets = cms.VPSet()
)
|
import os
def classify_giro_transaction(transactions_row):
if transactions_row['applicant_iban'] == os.environ.get('DKB_IBAN')\
or transactions_row['applicant_iban'] == os.environ.get('SPK_IBAN') \
or transactions_row['applicant_iban'] == os.environ.get('VISA_IBAN'):
return 'Internal transaction'
if transactions_row['applicant_iban'] == os.environ.get('DB_IBAN'):
return 'Fixed costs - rent and living'
if os.environ.get('INSURANCE') in transactions_row['applicant_name']:
return 'Fixed costs - Insurance'
if os.environ.get('GYM') in transactions_row['applicant_name']:
return 'Fixed costs - Gym'
if os.environ.get('EMPLOYER') in transactions_row['applicant_name']:
return 'Salary'
if transactions_row['posting_text'] == 'WERTPAPIERE':
return 'Stocks'
return 'Other'
def classify_cc_transaction(transaction_row):
if 'ATM' in transaction_row['description']:
return 'Cash'
if transaction_row['description'] == 'Einzahlung':
return 'Internal transaction'
return 'VISA Other'
|
import os, sys
import torch
from matplotlib.pyplot import imsave
from nn_common import NumpyDataset
print(torch.__version__)
from ui_common import ui_common
import nn_common as common
import numpy as np
from torchvision.utils import save_image
import serial_port as comms
import time
ports = comms.serial_ports()
if len(ports) ==0:
print("No serial ports, exiting")
exit(0)
print(ports)
device = "/dev/ttyUSB0"
if device in ports:
print(f"found {device}")
comms.connect(device)
else:
print(f"not found {device}")
exit(1)
dataset = NumpyDataset("augmented",fetch=False)
labels = dataset.get_labels()
np.set_printoptions(threshold = sys.maxsize)
last_prediction = ""
same_count = 0
if not torch.cuda.is_available():
print("torch says you have no cuda. Aborting")
exit(1)
os.makedirs('test/dummy', exist_ok=True)
transform = common.get_transform()
net = common.create_net(True)
device = common.get_device()
def predict_callback(owner, count):
global last_prediction, same_count
image_data = np.flip(owner.image_data,0).astype(np.int)
if image_data[8:-8,8:-8].max() > 80:
image = torch.from_numpy(image_data.copy()).to(device).float()
inputs = image.view(1, -1)
outputs = net(inputs)
output = outputs.cpu().detach().numpy()[0]
label_index = np.argmax(output)
new_prediction = labels[label_index]
filename = f"images/{count}-{new_prediction}"
else:
new_prediction = "silence"
# return
if last_prediction == new_prediction:
same_count = same_count + 1
else:
same_count = 0
# uncertain new prediction
ui.set_title("?")
last_prediction = new_prediction
if (new_prediction != "silence" and same_count == 1) or (new_prediction == "silence" and same_count == 2):
ui.set_title(f"prediction = {new_prediction}")
print(new_prediction)
comms.move(new_prediction)
elif same_count == 0:
# only interested in when we have two consecutive reading the same
ui.set_title(f"")
def main():
global labels, ui
ui = ui_common(callback=predict_callback)
device_id = ui.find_usb_device()
ui.start(device_id)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import os
from os import listdir
import glob
import ntpath
from flask import Flask, render_template, request, send_from_directory
from flask.ext.babel import Babel, gettext, ngettext
app = Flask(__name__)
#app.config.from_pyfile('mysettings.cfg')
babel = Babel(app)
import ConfigParser
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
Config = ConfigParser.ConfigParser()
Config.read("settings.ini")
_port = ConfigSectionMap("Server")['port']
_host = ConfigSectionMap("Server")['host']
_debug = ConfigSectionMap("Server")['debug']=="True"
_rtl = ConfigSectionMap("UI")['rtl']=="True"
_header_background = ConfigSectionMap("UI")['header-background']
_header_logo = ConfigSectionMap("UI")['header-logo']
_header_logo_href = ConfigSectionMap("UI")['header-logo-href']
_footer_url = ConfigSectionMap("Copyright")['url']
_footer_year = ConfigSectionMap("Copyright")['year']
app.config['BABEL_DEFAULT_LOCALE'] = ConfigSectionMap("UI")['lang']
from flask.ext.babel import refresh; refresh()
apppath = os.path.dirname(os.path.abspath(__file__))
library = apppath +'/static/library/'
@app.route("/")
def index():
item = request.args.get('item', '')
return get_folder(item)
def get_folder(subfolder=''):
item = library + subfolder
if os.path.isfile(item):
path = ntpath.basename(subfolder)
path = library+subfolder.replace(path, "")[1:] + '*.jpg'
rpath = '/static/library/'+subfolder.replace(path, "")[1:] + '*.jpg'
others = []
for g in glob.glob(path):
if not g.endswith('.thumb.jpg'):
item = {}
fn = ntpath.basename(g)
if fn==ntpath.basename(subfolder):
item['active'] = True
item['name'] = fn
item['url']='/'+g.replace(library, '')
item['full']=g.replace(apppath, '')
item['thumb']=item['full'].replace(".jpg", ".thumb.jpg")
others.append(item)
others = sorted(others, key=lambda item: item['name'])
return render_template('item.html', item=subfolder, others=others)
folder = item + '/'
lib = []
for f in listdir(folder):
if not (f=='README.md' or f=='.gitignore' or f=='create_thumbs.sh' or f.endswith('.thumb.jpg')):
item = {}
item['name'] = f
item['rname'] = subfolder +'/' + f
if os.path.isfile(folder+f):
item['path'] = subfolder + '/' + f
item['path'] = item['path'].replace('.jpg', '.thumb.jpg')
if os.path.isdir(folder+f):
for name in sorted(glob.glob(folder+f+'/*.thumb.jpg')):
item['thumb'] = ntpath.basename(name)
item['path'] = subfolder + f + '/' + item['thumb']
break
if not 'path' in item:
for name in sorted(glob.glob(folder+f+'/*.jpg')):
item['thumb'] = ntpath.basename(name)
item['path'] = subfolder + f + '/' + item['thumb']
break
lib.append(item)
# sort
lib = sorted(lib, key=lambda item: item['name'])
return render_template('index.html', folder=lib)
@app.context_processor
def inject_user():
#, home=gettext(u'Home')
return dict(
rtl=_rtl,
hbg=_header_background,
hlogo=_header_logo,
hlogohref=_header_logo_href,
footer_url=_footer_url,
footer_year=_footer_year
)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/favicon.ico')
@app.route('/humans.txt')
@app.route('/robots.txt')
@app.route('/sitemap.xml')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
if __name__ == "__main__":
app.run(host=_host, port=int(_port), debug=_debug) |
from datetime import datetime, timedelta
from django.db.models import Q
from rest_framework import viewsets, mixins, status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from .models import Feedback
from .serializers import FeedbackSerializer, FeedbackCreateSerializer
from .permissions import IsSuperuser
from apps.transactions.models import Transaction, TransactionProduct
from apps.core.patch_only_mixin import PatchOnlyMixin
class FeedbackViewSet(mixins.CreateModelMixin, mixins.ListModelMixin,
PatchOnlyMixin, viewsets.GenericViewSet):
"""
用户反馈,post/get(list)/patch
get(list) 根据 token 判断 is_superuser 是否 admin
type="1/2/3/4",对应投诉/售后/求购/咨询,默认4
type/content 必传,其余可不传, solved 是否解决, patch 传递
{
"type": "4",
"product_spec": "product_spec id",
"transaction_product": "transaction_product id",
"content": "反馈内容",
"solved": true
}
"""
queryset = Feedback.objects.all()
serializer_class = FeedbackSerializer
permission_classes = (IsAuthenticated, IsSuperuser)
def get_queryset(self):
queryset = Feedback.objects.all()
if not self.request.user.is_superuser:
queryset = queryset.filter(user=self.request.user)
else:
queryset = queryset.filter(merchant=self.request.user.merchant)
return queryset
def get_serializer_class(self):
if self.action == 'create':
return FeedbackCreateSerializer
else:
return FeedbackSerializer
def create(self, request):
"""
创建反馈 post create(all user)
"""
create_serializer = FeedbackCreateSerializer(data=request.data)
if create_serializer.is_valid(raise_exception=True):
create_serializer.save(user=request.user,
merchant=request.user.merchant)
if create_serializer.data.get('transaction_product'):
transaction_product = TransactionProduct.objects.get(
pk=create_serializer.data.get('transaction_product'))
transaction = transaction_product.transaction
if (transaction.status == Transaction.RECEIVE
and transaction.received_datetime + timedelta(days=7)
>= datetime.now()):
return Response({'detail': '确认收货后超过七天,无法进行售后。'},
status=status.HTTP_400_BAD_REQUEST)
else:
feedback = Feedback.objects.get(
pk=create_serializer.data.get('id'))
feedback_serializer = FeedbackSerializer(feedback, many=False)
return Response(feedback_serializer.data,
status=status.HTTP_201_CREATED)
def list(self, request):
"""
反馈列表 get(list)(all user)
url params: type/search(user/content)
type="1/2/3/4",对应投诉/售后/求购/咨询
"""
type = request.query_params.get('type')
search = request.query_params.get('search')
filter_condition = Q(merchant=request.user.merchant)
if type:
filter_condition = filter_condition & Q(type=type)
if search:
filter_condition = filter_condition & Q(
user__username__icontains=search) | Q(email__icontains=search)
queryset = Feedback.objects.filter(filter_condition)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = FeedbackSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = FeedbackSerializer(queryset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
|
# coding: utf-8
"""
Part of this code is based on a similar implementation present in FireWorks (https://pypi.python.org/pypi/FireWorks).
Work done by D. Waroquiers, A. Jain, and M. Kocher.
The main difference wrt the Fireworks implementation is that the QueueAdapter
objects provide a programmatic interface for setting important attributes
such as the number of MPI nodes, the number of OMP threads and the memory requirements.
This programmatic interface is used by the `TaskManager` for optimizing the parameters
of the run before submitting the job (Abinit provides the autoparal option that
allows one to get a list of parallel configuration and their expected efficiency).
"""
from __future__ import print_function, division, unicode_literals
import sys
import os
import abc
import string
import shlex
import copy
import getpass
import six
import json
from collections import namedtuple, OrderedDict, defaultdict
from subprocess import Popen, PIPE
from atomicfile import AtomicFile
from monty.string import is_string
from monty.collections import AttrDict
from monty.functools import lazy_property
from monty.io import FileLock
from pymatgen.core.units import Time, Memory
from .utils import Condition
from .launcher import ScriptEditor
import logging
logger = logging.getLogger(__name__)
__all__ = [
"MpiRunner",
"make_qadapter",
]
def slurm_parse_timestr(s):
"""
A slurm time parser. Accepts a string in one the following forms:
# "days-hours",
# "days-hours:minutes",
# "days-hours:minutes:seconds".
# "minutes",
# "minutes:seconds",
# "hours:minutes:seconds",
Returns:
Time in seconds.
Raises:
`ValueError` if string is not valid.
"""
days, hours, minutes, seconds = 0, 0, 0, 0
if '-' in s:
# "days-hours",
# "days-hours:minutes",
# "days-hours:minutes:seconds".
days, s = s.split("-")
days = int(days)
if ':' not in s:
hours = int(float(s))
elif s.count(':') == 1:
hours, minutes = map(int, s.split(':'))
elif s.count(':') == 2:
hours, minutes, seconds = map(int, s.split(':'))
else:
raise ValueError("More that 2 ':' in string!")
else:
# "minutes",
# "minutes:seconds",
# "hours:minutes:seconds",
if ':' not in s:
minutes = int(float(s))
elif s.count(':') == 1:
minutes, seconds = map(int, s.split(':'))
elif s.count(':') == 2:
hours, minutes, seconds = map(int, s.split(':'))
else:
raise ValueError("More than 2 ':' in string!")
return Time((days*24 + hours)*3600 + minutes*60 + seconds, "s")
def time2slurm(timeval, unit="s"):
"""
Convert a number representing a time value in the given unit (Default: seconds)
to a string following the slurm convention: "days-hours:minutes:seconds".
>>> assert time2slurm(61) == '0-0:1:1' and time2slurm(60*60+1) == '0-1:0:1'
>>> assert time2slurm(0.5, unit="h") == '0-0:30:0'
"""
d, h, m, s = 24*3600, 3600, 60, 1
timeval = Time(timeval, unit).to("s")
days, hours = divmod(timeval, d)
hours, minutes = divmod(hours, h)
minutes, secs = divmod(minutes, m)
return "%d-%d:%d:%d" % (days, hours, minutes, secs)
def time2pbspro(timeval, unit="s"):
"""
Convert a number representing a time value in the given unit (Default: seconds)
to a string following the PbsPro convention: "hours:minutes:seconds".
>>> assert time2pbspro(2, unit="d") == '48:0:0'
"""
h, m, s = 3600, 60, 1
timeval = Time(timeval, unit).to("s")
hours, minutes = divmod(timeval, h)
minutes, secs = divmod(minutes, m)
return "%d:%d:%d" % (hours, minutes, secs)
def timelimit_parser(s):
"""Convert a float or a string into time in seconds."""
try:
return Time(float(s), "s")
except ValueError:
return slurm_parse_timestr(s)
def any2mb(s):
"""Convert string or number to memory in megabytes."""
if is_string(s):
return int(Memory.from_string(s).to("Mb"))
else:
return int(s)
class MpiRunner(object):
"""
This object provides an abstraction for the mpirunner provided
by the different MPI libraries. It's main task is handling the
different syntax and options supported by the different mpirunners.
"""
def __init__(self, name, type=None, options=""):
self.name = name
self.type = None
self.options = options
def string_to_run(self, executable, mpi_procs, stdin=None, stdout=None, stderr=None):
stdin = "< " + stdin if stdin is not None else ""
stdout = "> " + stdout if stdout is not None else ""
stderr = "2> " + stderr if stderr is not None else ""
if self.has_mpirun:
if self.type is None:
# TODO: better treatment of mpirun syntax.
#se.add_line('$MPIRUN -n $MPI_PROCS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR')
num_opt = "-n " + str(mpi_procs)
cmd = " ".join([self.name, num_opt, executable, stdin, stdout, stderr])
else:
raise NotImplementedError("type %s is not supported!")
else:
#assert mpi_procs == 1
cmd = " ".join([executable, stdin, stdout, stderr])
return cmd
@property
def has_mpirun(self):
"""True if we are running via mpirun, mpiexec ..."""
return self.name is not None
class OmpEnv(AttrDict):
"""
Dictionary with the OpenMP environment variables
see https://computing.llnl.gov/tutorials/openMP/#EnvironmentVariables
"""
_KEYS = [
"OMP_SCHEDULE",
"OMP_NUM_THREADS",
"OMP_DYNAMIC",
"OMP_PROC_BIND",
"OMP_NESTED",
"OMP_STACKSIZE",
"OMP_WAIT_POLICY",
"OMP_MAX_ACTIVE_LEVELS",
"OMP_THREAD_LIMIT",
"OMP_STACKSIZE",
"OMP_PROC_BIND",
]
@classmethod
def as_ompenv(cls, obj):
"""Convert an object into a OmpEnv"""
if isinstance(obj, cls): return obj
if obj is None: return cls()
return cls(**obj)
def __init__(self, *args, **kwargs):
"""
Constructor method inherited from dictionary:
>>> assert OmpEnv(OMP_NUM_THREADS=1).OMP_NUM_THREADS == 1
To create an instance from an INI file, use:
OmpEnv.from_file(filename)
"""
super(OmpEnv, self).__init__(*args, **kwargs)
err_msg = ""
for key, value in self.items():
self[key] = str(value)
if key not in self._KEYS:
err_msg += "unknown option %s\n" % key
if err_msg:
raise ValueError(err_msg)
def export_str(self):
"""Return a string with the bash statements needed to setup the OMP env."""
return "\n".join("export %s=%s" % (k, v) for k, v in self.items())
class Hardware(object):
"""
This object collects information on the hardware available in a given queue.
Basic definitions:
- A node refers to the physical box, i.e. cpu sockets with north/south switches connecting memory systems
and extension cards, e.g. disks, nics, and accelerators
- A cpu socket is the connector to these systems and the cpu cores
- A cpu core is an independent computing with its own computing pipeline, logical units, and memory controller.
Each cpu core will be able to service a number of cpu threads, each having an independent instruction stream
but sharing the cores memory controller and other logical units.
"""
def __init__(self, **kwargs):
self.num_nodes = int(kwargs.pop("num_nodes"))
self.sockets_per_node = int(kwargs.pop("sockets_per_node"))
self.cores_per_socket = int(kwargs.pop("cores_per_socket"))
# Convert memory to megabytes.
m = str(kwargs.pop("mem_per_node"))
self.mem_per_node = int(Memory.from_string(m).to("Mb"))
if self.mem_per_node <= 0 or self.sockets_per_node <= 0 or self.cores_per_socket <= 0:
raise ValueError("invalid parameters: %s" % kwargs)
if kwargs:
raise ValueError("Found invalid keywords in the partition section:\n %s" % kwargs.keys())
def __str__(self):
"""String representation."""
lines = []
app = lines.append
app(" num_nodes: %d, sockets_per_node: %d, cores_per_socket: %d, mem_per_node %s," %
(self.num_nodes, self.sockets_per_node, self.cores_per_socket, self.mem_per_node))
return "\n".join(lines)
@property
def num_cores(self):
"""Total number of cores available"""
return self.cores_per_socket * self.sockets_per_node * self.num_nodes
@property
def cores_per_node(self):
"""Number of cores per node."""
return self.cores_per_socket * self.sockets_per_node
@property
def mem_per_core(self):
"""Memory available on a single node."""
return self.mem_per_node / self.cores_per_node
def can_use_omp_threads(self, omp_threads):
"""True if omp_threads fit in a node."""
return self.cores_per_node >= omp_threads
def divmod_node(self, mpi_procs, omp_threads):
"""Use divmod to compute (num_nodes, rest_cores)"""
return divmod(mpi_procs * omp_threads, self.cores_per_node)
class _ExcludeNodesFile(object):
"""
This file contains the list of nodes to be excluded.
Nodes are indexed by queue name.
"""
DIRPATH = os.path.join(os.getenv("HOME"), ".abinit", "abipy")
FILEPATH = os.path.join(DIRPATH, "exclude_nodes.json")
def __init__(self):
if not os.path.exists(self.FILEPATH):
if not os.path.exists(self.DIRPATH): os.makedirs(self.DIRPATH)
with FileLock(self.FILEPATH):
with open(self.FILEPATH, "w") as fh:
json.dump({}, fh)
def read_nodes(self, qname):
with open(self.FILEPATH, "w") as fh:
return json.load(fh).get(qname, [])
def add_nodes(self, qname, nodes):
nodes = (nodes,) if not isinstance(nodes, (tuple, list)) else nodes
with FileLock(self.FILEPATH):
with AtomicFile(self.FILEPATH, mode="w+") as fh:
d = json.load(fh)
if qname in d:
d["qname"].extend(nodes)
d["qname"] = list(set(d["qname"]))
else:
d["qname"] = nodes
json.dump(d, fh)
_EXCL_NODES_FILE = _ExcludeNodesFile()
class JobStatus(int):
"""
This object is an integer representing the status of a :class:`QueueJob`.
Slurm API, see `man squeue`.
JOB STATE CODES
Jobs typically pass through several states in the course of their execution. The typical states are
PENDING, RUNNING, SUSPENDED, COMPLETING, and COMPLETED. An explanation of each state follows.
BF BOOT_FAIL Job terminated due to launch failure, typically due to a hardware failure (e.g.
unable to boot the node or block and the job can not be requeued).
CA CANCELLED Job was explicitly cancelled by the user or system administrator.
The job may or may not have been initiated.
CD COMPLETED Job has terminated all processes on all nodes.
CF CONFIGURING Job has been allocated resources, but are waiting for them to become ready for use (e.g. booting).
CG COMPLETING Job is in the process of completing. Some processes on some nodes may still be active.
F FAILED Job terminated with non-zero exit code or other failure condition.
NF NODE_FAIL Job terminated due to failure of one or more allocated nodes.
PD PENDING Job is awaiting resource allocation.
PR PREEMPTED Job terminated due to preemption.
R RUNNING Job currently has an allocation.
S SUSPENDED Job has an allocation, but execution has been suspended.
TO TIMEOUT Job terminated upon reaching its time limit.
SE SPECIAL_EXIT The job was requeued in a special state. This state can be set by users, typically
in EpilogSlurmctld, if the job has terminated with a particular exit value.
"""
_STATUS_TABLE = OrderedDict([
(-1, "UNKNOWN"),
(0, "PENDING"),
(1, "RUNNING"),
(2, "RESIZING"),
(3, "SUSPENDED"),
(4, "COMPLETED"),
(5, "CANCELLED"),
(6, "FAILED"),
(7, "TIMEOUT"),
(8, "PREEMPTED"),
(9, "NODEFAIL"),
])
def __repr__(self):
return "<%s: %s, at %s>" % (self.__class__.__name__, str(self), id(self))
def __str__(self):
"""String representation."""
return self._STATUS_TABLE[self]
@classmethod
def from_string(cls, s):
"""Return a :class`JobStatus` instance from its string representation."""
for num, text in cls._STATUS_TABLE.items():
if text == s: return cls(num)
else:
#raise ValueError("Wrong string %s" % s)
logger.warning("Got unknown status: %s" % s)
return cls.from_string("UNKNOWN")
class QueueJob(object):
"""
This object provides methods to contact the resource manager to get info on the status
of the job and useful statistics. This is an abstract class.
"""
# Used to handle other resource managers.
S_UNKNOWN = JobStatus.from_string("UNKNOWN")
# Slurm status
S_PENDING = JobStatus.from_string("PENDING")
S_RUNNING = JobStatus.from_string("RUNNING")
S_RESIZING = JobStatus.from_string("RESIZING")
S_SUSPENDED = JobStatus.from_string("SUSPENDED")
S_COMPLETED = JobStatus.from_string("COMPLETED")
S_CANCELLED = JobStatus.from_string("CANCELLED")
S_FAILED = JobStatus.from_string("FAILED")
S_TIMEOUT = JobStatus.from_string("TIMEOUT")
S_PREEMPTED = JobStatus.from_string("PREEMPTED")
S_NODEFAIL = JobStatus.from_string("NODEFAIL")
def __init__(self, queue_id, qname, qout_path=None, qerr_path=None):
self.qid, self.qname = queue_id, qname
self.qout_path, self.qerr_path = qout_path, qerr_path
# Initialize properties.
self.status, self.exitcode, self.signal = None, None, None
#def __str__(self):
def __bool__(self):
return self.qid is not None
__nonzero__ = __bool__
@property
def is_completed(self):
return self.status == self.S_COMPLETED
@property
def is_running(self):
return self.status == self.S_RUNNING
@property
def is_failed(self):
return self.status == self.S_FAILED
@property
def timeout(self):
return self.status == self.S_TIMEOUT
@property
def has_node_failures(self):
return self.status == self.S_NODEFAIL
@property
def unknown_status(self):
return self.status == self.S_UNKNOWN
def set_status_exitcode_signal(self, status, exitcode, signal):
self.status, self.exitcode, self.signal = status, exitcode, signal
def likely_code_error(self):
"""
See http://man7.org/linux/man-pages/man7/signal.7.html
SIGHUP 1 Term Hangup detected on controlling terminal or death of controlling process
SIGINT 2 Term Interrupt from keyboard
SIGQUIT 3 Core Quit from keyboard
SIGILL 4 Core Illegal Instruction
SIGABRT 6 Core Abort signal from abort(3)
SIGFPE 8 Core Floating point exception
SIGKILL 9 Term Kill signal
SIGSEGV 11 Core Invalid memory reference
SIGPIPE 13 Term Broken pipe: write to pipe with no readers
SIGALRM 14 Term Timer signal from alarm(2)
SIGTERM 15 Term Termination signal
SIGUSR1 30,10,16 Term User-defined signal 1
SIGUSR2 31,12,17 Term User-defined signal 2
SIGCHLD 20,17,18 Ign Child stopped or terminated
SIGCONT 19,18,25 Cont Continue if stopped
SIGSTOP 17,19,23 Stop Stop process
SIGTSTP 18,20,24 Stop Stop typed at terminal
SIGTTIN 21,21,26 Stop Terminal input for background process
SIGTTOU 22,22,27 Stop Terminal output for background process
The signals SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
Next the signals not in the POSIX.1-1990 standard but described in
SUSv2 and POSIX.1-2001.
Signal Value Action Comment
────────────────────────────────────────────────────────────────────
SIGBUS 10,7,10 Core Bus error (bad memory access)
SIGPOLL Term Pollable event (Sys V).
Synonym for SIGIO
SIGPROF 27,27,29 Term Profiling timer expired
SIGSYS 12,31,12 Core Bad argument to routine (SVr4)
SIGTRAP 5 Core Trace/breakpoint trap
SIGURG 16,23,21 Ign Urgent condition on socket (4.2BSD)
SIGVTALRM 26,26,28 Term Virtual alarm clock (4.2BSD)
SIGXCPU 24,24,30 Core CPU time limit exceeded (4.2BSD)
SIGXFSZ 25,25,31 Core File size limit exceeded (4.2BSD)
"""
for sig_name in ("SIGFPE",):
if self.received_signal(sig_name): return sig_name
return False
def received_signal(self, sig_name):
if self.signal is None: return False
# Get the numeric value from signal and compare it with self.signal
import signal
try:
return self.signal == getattr(signal, sig_name)
except AttributeError:
# invalid sig_name or sig_name not available on this OS.
return False
def estimated_start_time(self):
"""Return date with estimated start time. None if it cannot be detected"""
return None
def get_info(self, **kwargs):
return None
def get_nodes(self, **kwargs):
return None
def get_stats(self, **kwargs):
return None
class SlurmJob(QueueJob):
"""Handler for Slurm jobs."""
def estimated_start_time(self):
#squeue --start -j 116791
# JOBID PARTITION NAME USER ST START_TIME NODES NODELIST(REASON)
# 116791 defq gs6q2wop cyildiri PD 2014-11-04T09:27:15 16 (QOSResourceLimit)
cmd = "squeue" "--start", "--job %d" % self.qid
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
process.wait()
if process.returncode != 0: return None
lines = process.stdout.readlines()
if len(lines) <= 2: return None
from datetime import datetime
for line in lines:
tokens = line.split()
if int(tokens[0]) == self.qid:
date_string = tokens[5]
if date_string == "N/A": return None
return datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S")
return None
def get_info(self, **kwargs):
# See https://computing.llnl.gov/linux/slurm/sacct.html
#If SLURM job ids are reset, some job numbers will
#probably appear more than once refering to different jobs.
#Without this option only the most recent jobs will be displayed.
#state Displays the job status, or state.
#Output can be RUNNING, RESIZING, SUSPENDED, COMPLETED, CANCELLED, FAILED, TIMEOUT,
#PREEMPTED or NODE_FAIL. If more information is available on the job state than will fit
#into the current field width (for example, the uid that CANCELLED a job) the state will be followed by a "+".
#gmatteo@master2:~
#sacct --job 112367 --format=jobid,exitcode,state --allocations --parsable2
#JobID|ExitCode|State
#112367|0:0|RUNNING
#scontrol show job 800197 --oneliner
# For more info
#login1$ scontrol show job 1676354
#cmd = "sacct --job %i --format=jobid,exitcode,state --allocations --parsable2" % self.qid
cmd = "scontrol show job %i --oneliner" % self.qid
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
process.wait()
if process.returncode != 0:
#print(process.stderr.readlines())
return None
line = process.stdout.read()
#print("line", line)
tokens = line.split()
info = AttrDict()
for line in tokens:
#print(line)
k, v = line.split("=")
info[k] = v
#print(info)
qid = int(info.JobId)
assert qid == self.qid
exitcode = info.ExitCode
status = info.JobState
if ":" in exitcode:
exitcode, signal = map(int, exitcode.split(":"))
else:
exitcode, signal = int(exitcode), None
i = status.find("+")
if i != -1: status = status[:i]
self.set_status_exitcode_signal(JobStatus.from_string(status), exitcode, signal)
return AttrDict(exitcode=exitcode, signal=signal, status=status)
def get_stats(self, **kwargs):
cmd = "sacct --long --job %s --parsable2" % self.qid
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
process.wait()
lines = process.stdout.readlines()
keys = lines[0].strip().split("|")
values = lines[1].strip().split("|")
#print("lines0", lines[0])
return dict(zip(keys, values))
class PbsProJob(QueueJob):
"""Handler for PbsPro Jobs"""
# Mapping PrbPro --> Slurm. From `man qstat`
#
# S The job’s state:
# B Array job has at least one subjob running.
# E Job is exiting after having run.
# F Job is finished.
# H Job is held.
# M Job was moved to another server.
# Q Job is queued.
# R Job is running.
# S Job is suspended.
# T Job is being moved to new location.
# U Cycle-harvesting job is suspended due to keyboard activity.
# W Job is waiting for its submitter-assigned start time to be reached.
# X Subjob has completed execution or has been deleted.
PBSSTAT_TO_SLURM = defaultdict(lambda x: QueueJob.S_UNKNOWN, [
("E", QueueJob.S_FAILED),
("F", QueueJob.S_COMPLETED),
("Q", QueueJob.S_PENDING),
("R", QueueJob.S_RUNNING),
("S", QueueJob.S_SUSPENDED),
])
def estimated_start_time(self):
# qstat -T - Shows the estimated start time for all jobs in the queue.
# Est
# Req'd Req'd Start
#Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time
#--------------- -------- -------- ---------- ------ --- --- ------ ----- - -----
#5669001.frontal username large gs.Pt -- 96 96 -- 03:00 Q --
process = Popen(["qstat" "-T", str(self.qid)], stdout=PIPE, stderr=PIPE)
process.wait()
if process.returncode != 0: return None
line = process.stdout.readlines()[-1]
sdate = line.split()[-1]
if sdate in ("--", "?"): return None
# TODO One should convert to datetime
return sdate
def get_info(self, **kwargs):
#$> qstat 5666289
#frontal1:
# Req'd Req'd Elap
#Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time
#--------------- -------- -------- ---------- ------ --- --- ------ ----- - -----
#5666289.frontal username main_ivy MorfeoTChk 57546 1 4 -- 08:00 R 00:17
cmd = "qstat %d" % self.qid
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
if process.returncode != 0:
#print(process.stderr.readlines())
return None
out = process.stdout.readlines()[-1]
status = self.PBSSTAT_TO_SLURM[out.split()[9]]
# Exit code and signal are not available.
self.set_status_exitcode_signal(status, None, None)
class SgeJob(QueueJob):
"""Not supported"""
def all_subclasses(cls):
"""
Given a class `cls`, this recursive function returns a list with
all subclasses, subclasses of subclasses, and so on.
"""
subclasses = cls.__subclasses__()
return subclasses + [g for s in subclasses for g in all_subclasses(s)]
def show_qparams(qtype, stream=sys.stdout):
"""Print to the given stream the template of the :class:`QueueAdapter` of type `qtype`."""
for cls in all_subclasses(QueueAdapter):
if cls.QTYPE == qtype: return stream.write(cls.QTEMPLATE)
raise ValueError("Cannot find class associated to qtype %s" % qtype)
def all_qtypes():
"""List of all qtypes supported."""
return [cls.QTYPE for cls in all_subclasses(QueueAdapter)]
def make_qadapter(**kwargs):
"""
Return the concrete :class:`QueueAdapter` class from a string.
Note that one can register a customized version with:
.. example::
from qadapters import SlurmAdapter
class MyAdapter(SlurmAdapter):
QTYPE = "myslurm"
# Add your customized code here
# Register your class.
SlurmAdapter.register(MyAdapter)
make_qadapter(qtype="myslurm", **kwargs)
.. warning::
MyAdapter should be pickleable, hence one should declare it
at the module level so that pickle can import it at run-time.
"""
# Get all known subclasses of QueueAdapter.
d = {c.QTYPE: c for c in all_subclasses(QueueAdapter)}
qtype = kwargs["queue"].pop("qtype")
return d[qtype](**kwargs)
class QueueAdapterError(Exception):
"""Base Error class for exceptions raise by QueueAdapter."""
class MaxNumLaunchesError(QueueAdapterError):
"""Raised by `submit_to_queue` if we try to submit more than `max_num_launches` times."""
class QueueAdapter(six.with_metaclass(abc.ABCMeta, object)):
"""
The `QueueAdapter` is responsible for all interactions with a specific queue management system.
This includes handling all details of queue script format as well as queue submission and management.
This is the **abstract** base class defining the methods that must be implemented by the concrete classes.
Concrete classes should extend this class with implementations that work on specific queue systems.
"""
Error = QueueAdapterError
MaxNumLaunchesError = MaxNumLaunchesError
Job = QueueJob
@classmethod
def autodoc(cls):
return """
# dictionary with info on the hardware available on this particular queue.
hardware:
num_nodes: # Number of nodes available on this queue. Mandatory
sockets_per_node: # Mandatory.
cores_per_socket: # Mandatory.
# dictionary with the options used to prepare the enviroment before submitting the job
job:
setup: # List of commands (str) executed before running (default empty)
omp_env: # Dictionary with OpenMP env variables (default empty i.e. no OpenMP)
modules: # List of modules to be imported (default empty)
shell_env: # Dictionary with shell env variables.
mpi_runner: # MPI runner i.e. mpirun, mpiexec, Default is None i.e. no mpirunner
pre_run: # List of commands executed before the run (default: empty)
post_run: # List of commands executed after the run (default: empty)
# dictionary with the name of the queue and optional parameters
# used to build/customize the header of the submission script.
queue:
qname: # Name of the queue (mandatory)
qparams: # Dictionary with values used to generate the header of the job script
# See pymatgen.io.abinitio.qadapters.py for the list of supported values.
# dictionary with the constraints that must be fulfilled in order to run on this queue.
limits:
min_cores: # Minimum number of cores (default 1)
max_cores: # Maximum number of cores (mandatory)
min_mem_per_proc: # Minimum memory per MPI process in Mb, units can be specified e.g. 1.4 Gb
# (default hardware.mem_per_core)
max_mem_per_proc: # Maximum memory per MPI process in Mb, units can be specified e.g. `1.4Gb`
# (default hardware.mem_per_node)
condition: # MongoDB-like condition (default empty, i.e. not used)
"""
def __init__(self, **kwargs):
"""
Args:
qname: Name of the queue.
qparams: Dictionary with the parameters used in the template.
setup: String or list of commands to execute during the initial setup.
modules: String or list of modules to load before running the application.
shell_env: Dictionary with the environment variables to export before running the application.
omp_env: Dictionary with the OpenMP variables.
pre_run: String or list of commands to execute before launching the calculation.
post_run: String or list of commands to execute once the calculation is completed.
mpi_runner: Path to the MPI runner or :class:`MpiRunner` instance. None if not used
max_num_launches: Maximum number of submissions that can be done. Defaults to 10
qverbatim:
min_cores, max_cores: Minimum and maximum number of cores that can be used
min_mem_per_proc=Minimun memory per process in megabytes.
max_mem_per_proc=Maximum memory per process in megabytes.
timelimit: Time limit in seconds
priority: Priority level, integer number > 0
allocate_nodes: True if we must allocate entire nodes"
condition: Condition object (dictionary)
.. note::
priority is a non-negative integer used to order the qadapters. The :class:`TaskManager` will
try to run jobs on the qadapter with the highest priority if possible
"""
# TODO
#task_classes
# Make defensive copies so that we can change the values at runtime.
kwargs = copy.deepcopy(kwargs)
self.priority = int(kwargs.pop("priority"))
self.hw = Hardware(**kwargs.pop("hardware"))
self._parse_queue(kwargs.pop("queue"))
self._parse_limits(kwargs.pop("limits"))
self._parse_job(kwargs.pop("job"))
if kwargs:
raise ValueError("Found unknown keywords:\n%s" % kwargs.keys())
self.validate_qparams()
# List of dictionaries with the parameters used to submit jobs
# The launcher will use this information to increase the resources
self.launches, self.max_num_launches = [], kwargs.pop("max_num_launches", 10)
# Initialize some values from the info reported in the partition.
self.set_mpi_procs(self.min_cores)
self.set_mem_per_proc(self.min_mem_per_proc)
# Final consistency check.
self.validate_qparams()
def validate_qparams(self):
"""
Check if the keys specified by the user in qparams are supported.
Raise:
`ValueError` if errors.
"""
# No validation for ShellAdapter.
if isinstance(self, ShellAdapter): return
# Parse the template so that we know the list of supported options.
err_msg = ""
for param in self.qparams:
if param not in self.supported_qparams:
err_msg += "Unsupported QUEUE parameter name %s\n" % param
err_msg += "Supported are: \n"
for param_sup in self.supported_qparams:
err_msg += " %s \n" % param_sup
if err_msg:
raise ValueError(err_msg)
def _parse_limits(self, d):
self.set_timelimit(timelimit_parser(d.pop("timelimit")))
self.min_cores = int(d.pop("min_cores", 1))
self.max_cores = int(d.pop("max_cores"))
# FIXME: Neeed because autoparal 1 with paral_kgb 1 is not able to estimate memory
self.min_mem_per_proc = any2mb(d.pop("min_mem_per_proc", self.hw.mem_per_core))
self.max_mem_per_proc = any2mb(d.pop("max_mem_per_proc", self.hw.mem_per_node))
#self.allocate_nodes = bool(d.pop("allocate_nodes", False))
self.condition = Condition(d.pop("condition", {}))
if d:
raise ValueError("Found unknown keyword(s) in limits section:\n %s" % d.keys())
def _parse_job(self, d):
setup = d.pop("setup", None)
if is_string(setup): setup = [setup]
self.setup = setup[:] if setup is not None else []
omp_env = d.pop("omp_env", None)
self.omp_env = omp_env.copy() if omp_env is not None else {}
modules = d.pop("modules", None)
if is_string(modules): modules = [modules]
self.modules = modules[:] if modules is not None else []
shell_env = d.pop("shell_env", None)
self.shell_env = shell_env.copy() if shell_env is not None else {}
self.mpi_runner = d.pop("mpi_runner", None)
if not isinstance(self.mpi_runner, MpiRunner):
self.mpi_runner = MpiRunner(self.mpi_runner)
pre_run = d.pop("pre_run", None)
if is_string(pre_run): pre_run = [pre_run]
self.pre_run = pre_run[:] if pre_run is not None else []
post_run = d.pop("post_run", None)
if is_string(post_run): post_run = [post_run]
self.post_run = post_run[:] if post_run is not None else []
if d:
raise ValueError("Found unknown keyword(s) in job section:\n %s" % d.keys())
def _parse_queue(self, d):
# Init params
qparams = d.pop("qparams", None)
self._qparams = copy.deepcopy(qparams) if qparams is not None else {}
self.set_qname(d.pop("qname"))
if d:
raise ValueError("Found unknown keyword(s) in queue section:\n %s" % d.keys())
def __str__(self):
lines = ["%s:%s" % (self.__class__.__name__, self.qname)]
app = lines.append
app("Hardware:\n" + str(self.hw))
#lines.extend(["qparams:\n", str(self.qparams)])
if self.has_omp: app(str(self.omp_env))
return "\n".join(lines)
@property
def qparams(self):
"""Dictionary with the parameters used to construct the header."""
return self._qparams
@lazy_property
def supported_qparams(self):
"""
Dictionary with the supported parameters that can be passed to the
queue manager (obtained by parsing QTEMPLATE).
"""
import re
return re.findall("\$\$\{(\w+)\}", self.QTEMPLATE)
@property
def has_mpi(self):
"""True if we are using MPI"""
return bool(self.mpi_runner)
@property
def has_omp(self):
"""True if we are using OpenMP threads"""
return hasattr(self, "omp_env") and bool(getattr(self, "omp_env"))
@property
def num_cores(self):
"""Total number of cores employed"""
return self.mpi_procs * self.omp_threads
@property
def omp_threads(self):
"""Number of OpenMP threads."""
if self.has_omp:
return self.omp_env["OMP_NUM_THREADS"]
else:
return 1
@property
def pure_mpi(self):
"""True if only MPI is used."""
return self.has_mpi and not self.has_omp
@property
def pure_omp(self):
"""True if only OpenMP is used."""
return self.has_omp and not self.has_mpi
@property
def hybrid_mpi_omp(self):
"""True if we are running in MPI+Openmp mode."""
return self.has_omp and self.has_mpi
@property
def run_info(self):
"""String with info on the run."""
return "MPI: %d, OMP: %d" % (self.mpi_procs, self.omp_threads)
def deepcopy(self):
"""Deep copy of the object."""
return copy.deepcopy(self)
def record_launch(self, queue_id): # retcode):
"""Save submission"""
self.launches.append(
AttrDict(queue_id=queue_id, mpi_procs=self.mpi_procs, omp_threads=self.omp_threads,
mem_per_proc=self.mem_per_proc, timelimit=self.timelimit))
return len(self.launches)
def remove_launch(self, index):
"""Remove launch with the given index."""
self.launches.pop(index)
@property
def num_launches(self):
"""Number of submission tried with this adapter so far."""
return len(self.launches)
@property
def last_launch(self):
"""Return the last launch."""
if len(self.launches) > 0:
return self.launches[-1]
else:
return None
def validate(self):
"""Validate the parameters of the run. Raises self.Error if invalid parameters."""
errors = []
app = errors.append
if not self.max_cores >= self.mpi_procs * self.omp_threads >= self.min_cores:
app("self.max_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied")
if self.omp_threads > self.hw.cores_per_node:
app("omp_threads > hw.cores_per_node")
if self.mem_per_proc > self.hw.mem_per_node:
app("mem_mb >= self.hw.mem_per_node")
if not self.max_mem_per_proc >= self.mem_per_proc >= self.min_mem_per_proc:
app("self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied")
if self.priority <= 0:
app("priority must be > 0")
if not (1 <= self.min_cores <= self.hw.num_cores >= self.max_cores):
app("1 <= min_cores <= hardware num_cores >= max_cores not satisfied")
if errors:
raise self.Error(str(self) + "\n".join(errors))
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMP threads."""
self.omp_env["OMP_NUM_THREADS"] = omp_threads
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self._mpi_procs
def set_mpi_procs(self, mpi_procs):
"""Set the number of MPI processes to mpi_procs"""
self._mpi_procs = mpi_procs
@property
def qname(self):
"""The name of the queue."""
return self._qname
def set_qname(self, qname):
"""Set the name of the queue."""
self._qname = qname
@property
def timelimit(self):
"""Returns the walltime in seconds."""
return self._timelimit
def set_timelimit(self, timelimit):
"""Set the walltime in seconds."""
self._timelimit = timelimit
@property
def mem_per_proc(self):
"""The memory per process in megabytes."""
return self._mem_per_proc
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
# Hack needed because abinit is still not able to estimate memory.
if mem_mb <= 0:
mem_mb = self.min_mem_per_proc
self._mem_per_proc = mem_mb
@property
def total_mem(self):
"""Total memory required by the job in megabytes."""
return Memory(self.mem_per_proc * self.mpi_procs, "Mb")
@abc.abstractmethod
def cancel(self, job_id):
"""
Cancel the job.
Args:
job_id: Job identifier.
Returns:
Exit status.
"""
def can_run_pconf(self, pconf):
"""True if the qadapter in principle is able to run the :class:`ParalConf` pconf"""
if not self.max_cores >= pconf.num_cores >= self.min_cores: return False
if not self.hw.can_use_omp_threads(self.omp_threads): return False
if pconf.mem_per_proc > self.hw.mem_per_node: return False
return self.condition(pconf)
def distribute(self, mpi_procs, omp_threads, mem_per_proc):
"""
Returns (num_nodes, mpi_per_node)
Aggressive: When Open MPI thinks that it is in an exactly- or under-subscribed mode
(i.e., the number of running processes is equal to or less than the number of available processors),
MPI processes will automatically run in aggressive mode, meaning that they will never voluntarily give
up the processor to other processes. With some network transports, this means that Open MPI will spin
in tight loops attempting to make message passing progress, effectively causing other processes to not get
any CPU cycles (and therefore never make any progress)
"""
class Distrib(namedtuple("Distrib", "num_nodes mpi_per_node exact")):
pass
#@property
#def mem_per_node
# return self.mpi_per_node * mem_per_proc
#def set_nodes(self, nodes):
hw = self.hw
# TODO: Add check on user-memory
if mem_per_proc <= 0:
logger.warning("mem_per_proc <= 0")
mem_per_proc = hw.mem_per_core
if mem_per_proc > hw.mem_per_node:
raise self.Error(
"mem_per_proc > mem_per_node.\n Cannot distribute mpi_procs %d, omp_threads %d, mem_per_proc %s" %
(mpi_procs, omp_threads, mem_per_proc))
# Try to use all then cores in the node.
num_nodes, rest_cores = hw.divmod_node(mpi_procs, omp_threads)
if num_nodes == 0 and mpi_procs * mem_per_proc <= hw.mem_per_node:
# One node is enough
return Distrib(num_nodes=1, mpi_per_node=mpi_procs, exact=True)
if num_nodes == 0: num_nodes = 2
mpi_per_node = mpi_procs // num_nodes
if mpi_per_node * mem_per_proc <= hw.mem_per_node and rest_cores == 0:
# Commensurate with nodes.
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=True)
#if mode == "block", "cyclic"
# Try first to pack MPI processors in a node as much as possible
mpi_per_node = int(hw.mem_per_node / mem_per_proc)
assert mpi_per_node != 0
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
print("exact --> false", num_nodes, mpi_per_node)
if mpi_per_node * omp_threads <= hw.cores_per_node and mem_per_proc <= hw.mem_per_node:
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=False)
if (mpi_procs * omp_threads) % mpi_per_node != 0:
# Have to reduce the number of MPI procs per node
for mpi_per_node in reversed(range(1, mpi_per_node)):
if mpi_per_node > hw.cores_per_node: continue
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
if (mpi_procs * omp_threads) % mpi_per_node == 0 and mpi_per_node * mem_per_proc <= hw.mem_per_node:
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=False)
else:
raise self.Error("Cannot distribute mpi_procs %d, omp_threads %d, mem_per_proc %s" %
(mpi_procs, omp_threads, mem_per_proc))
def optimize_params(self):
"""
This method is called in get_subs_dict. Return a dict with parameters to be added to qparams
Subclasses may provide a specialized version.
"""
logger.debug("optimize_params of baseclass --> no optimization available!!!")
return {}
def get_subs_dict(self):
"""
Return substitution dict for replacements into the template
Subclasses may want to customize this method.
"""
#d = self.qparams.copy()
d = self.qparams
d.update(self.optimize_params())
# clean null values
subs_dict = {k: v for k, v in d.items() if v is not None}
#print("subs_dict:", subs_dict)
return subs_dict
def _make_qheader(self, job_name, qout_path, qerr_path):
"""Return a string with the options that are passed to the resource manager."""
# get substitution dict for replacements into the template
subs_dict = self.get_subs_dict()
# Set job_name and the names for the stderr and stdout of the
# queue manager (note the use of the extensions .qout and .qerr
# so that we can easily locate this file.
subs_dict['job_name'] = job_name.replace('/', '_')
subs_dict['_qout_path'] = qout_path
subs_dict['_qerr_path'] = qerr_path
qtemplate = QScriptTemplate(self.QTEMPLATE)
# might contain unused parameters as leftover $$.
unclean_template = qtemplate.safe_substitute(subs_dict)
# Remove lines with leftover $$.
clean_template = []
for line in unclean_template.split('\n'):
if '$$' not in line:
clean_template.append(line)
return '\n'.join(clean_template)
def get_script_str(self, job_name, launch_dir, executable, qout_path, qerr_path,
stdin=None, stdout=None, stderr=None):
"""
Returns a (multi-line) String representing the queue script, e.g. PBS script.
Uses the template_file along with internal parameters to create the script.
Args:
job_name: Name of the job.
launch_dir: (str) The directory the job will be launched in.
executable: String with the name of the executable to be executed.
qout_path Path of the Queue manager output file.
qerr_path: Path of the Queue manager error file.
"""
# PbsPro does not accept job_names longer than 15 chars.
if len(job_name) > 14 and isinstance(self, PbsProAdapter):
job_name = job_name[:14]
# Construct the header for the Queue Manager.
qheader = self._make_qheader(job_name, qout_path, qerr_path)
# Add the bash section.
se = ScriptEditor()
if self.setup:
se.add_comment("Setup section")
se.add_lines(self.setup)
se.add_emptyline()
if self.modules:
se.add_comment("Load Modules")
se.add_line("module purge")
se.load_modules(self.modules)
se.add_emptyline()
if self.has_omp:
se.add_comment("OpenMp Environment")
se.declare_vars(self.omp_env)
se.add_emptyline()
if self.shell_env:
se.add_comment("Shell Environment")
se.declare_vars(self.shell_env)
se.add_emptyline()
# Cd to launch_dir
se.add_line("cd " + os.path.abspath(launch_dir))
if self.pre_run:
se.add_comment("Commands before execution")
se.add_lines(self.pre_run)
se.add_emptyline()
# Construct the string to run the executable with MPI and mpi_procs.
line = self.mpi_runner.string_to_run(executable, self.mpi_procs,
stdin=stdin, stdout=stdout, stderr=stderr)
se.add_line(line)
if self.post_run:
se.add_emptyline()
se.add_comment("Commands after execution")
se.add_lines(self.post_run)
return qheader + se.get_script_str() + "\n"
def submit_to_queue(self, script_file):
"""
Public API: wraps the concrete implementation _submit_to_queue
Raises:
`self.MaxNumLaunchesError` if we have already tried to submit the job max_num_launches
`self.Error` if generic error
"""
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
if self.num_launches == self.max_num_launches:
raise self.MaxNumLaunchesError("num_launches %s == max_num_launches %s" % (self.num_launches, self.max_num_launches))
# Call the concrete implementation.
queue_id, process = self._submit_to_queue(script_file)
self.record_launch(queue_id)
if queue_id is None:
submit_err_file = script_file + ".err"
err = str(process.stderr.read())
# Dump the error and raise
with open(submit_err_file, mode='w') as f:
f.write("sbatch submit process stderr:\n" + err)
f.write("qparams:\n" + str(self.qparams))
try:
args = process.args
except AttributeError:
args = ["Unknown",]
raise self.Error("Error in job submission with %s. file %s and args %s\n" %
(self.__class__.__name__, script_file, args) +
"The error response reads:\n %s" % err)
# Here we create a concrete instance of QueueJob
return self.Job(queue_id, self.qname), process
@abc.abstractmethod
def _submit_to_queue(self, script_file):
"""
Submits the job to the queue, probably using subprocess or shutil
This method must be provided by the concrete classes and will be called by submit_to_queue
Args:
script_file: (str) name of the script file to use (String)
Returns:
queue_id, process
"""
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
if username is None: username = getpass.getuser()
njobs, process = self._get_njobs_in_queue(username=username)
if process is not None and process.returncode != 0:
# there's a problem talking to squeue server?
err_msg = ('Error trying to get the number of jobs in the queue' +
'The error response reads:\n {}'.format(process.stderr.read()))
logger.critical(err_msg)
if not isinstance(self, ShellAdapter):
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs
@abc.abstractmethod
def _get_njobs_in_queue(self, username):
"""
Concrete Subclasses must implement this method. Return (njobs, process)
"""
# Methods to fix problems
def add_exclude_nodes(self, nodes):
return _EXCL_NODES_FILE.add_nodes(self.qname, nodes)
def get_exclude_nodes(self):
return _EXCL_NODES_FILE.read_nodes(self.qname)
@abc.abstractmethod
def exclude_nodes(self, nodes):
"""
Method to exclude nodes in the calculation
"""
def more_mem_per_proc(self, factor=1):
"""
Method to increase the amount of memory asked for, by factor.
Return: new memory if success, 0 if memory cannot be increased.
"""
base_increase = 2000
old_mem = self.mem_per_proc
new_mem = old_mem + factor*base_increase
if new_mem < self.hw.mem_per_node:
self.set_mem_per_proc(new_mem)
return new_mem
logger.warning('could not increase mem_per_proc further')
return 0
def more_mpi_procs(self, factor=1):
"""
Method to increase the number of MPI procs.
Return: new number of processors if success, 0 if processors cannot be increased.
"""
base_increase = 12
new_cpus = self.mpi_procs + factor * base_increase
if new_cpus * self.omp_threads < self.max_cores:
self.set_mpi_procs(new_cpus)
return new_cpus
logger.warning('more_mpi_procs reached the limit')
return 0
####################
# Concrete classes #
####################
class ShellAdapter(QueueAdapter):
"""Simple Adapter used to submit runs through the shell."""
QTYPE = "shell"
QTEMPLATE = """\
#!/bin/bash
$${qverbatim}
"""
def cancel(self, job_id):
return os.system("kill -9 %d" % job_id)
def _submit_to_queue(self, script_file):
# submit the job, return process and pid.
process = Popen(("/bin/bash", script_file), stderr=PIPE)
return process.pid, process
def _get_njobs_in_queue(self, username):
return None, None
def exclude_nodes(self, nodes):
return False
class SlurmAdapter(QueueAdapter):
"""Adapter for SLURM."""
QTYPE = "slurm"
Job = SlurmJob
QTEMPLATE = """\
#!/bin/bash
#SBATCH --partition=$${partition}
#SBATCH --job-name=$${job_name}
#SBATCH --nodes=$${nodes}
#SBATCH --total_tasks=$${total_tasks}
#SBATCH --ntasks=$${ntasks}
#SBATCH --ntasks-per-node=$${ntasks_per_node}
#SBATCH --cpus-per-task=$${cpus_per_task}
#SBATCH --mem=$${mem}
#SBATCH --mem-per-cpu=$${mem_per_cpu}
#SBATCH --hint=$${hint}
#SBATCH --time=$${time}
#SBATCH --exclude=$${exclude_nodes}
#SBATCH --account=$${account}
#SBATCH --mail-user=$${mail_user}
#SBATCH --mail-type=$${mail_type}
#SBATCH --constraint=$${constraint}
#SBATCH --gres=$${gres}
#SBATCH --requeue=$${requeue}
#SBATCH --nodelist=$${nodelist}
#SBATCH --propagate=$${propagate}
#SBATCH --licenses=$${licenses}
#SBATCH --output=$${_qout_path}
#SBATCH --error=$${_qerr_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(SlurmAdapter, self).set_qname(qname)
self.qparams["partition"] = qname
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(SlurmAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["ntasks"] = mpi_procs
def set_omp_threads(self, omp_threads):
super(SlurmAdapter, self).set_omp_threads(omp_threads)
self.qparams["cpus_per_task"] = omp_threads
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(SlurmAdapter, self).set_mem_per_proc(mem_mb)
self.qparams["mem_per_cpu"] = int(mem_mb)
# Remove mem if it's defined.
self.qparams.pop("mem", None)
def set_timelimit(self, timelimit):
super(SlurmAdapter, self).set_timelimit(timelimit)
self.qparams["time"] = time2slurm(timelimit)
def cancel(self, job_id):
return os.system("scancel %d" % job_id)
def optimize_params(self):
return {}
#dist = self.distribute(self.mpi_procs, self.omp_threads, self.mem_per_proc)
##print(dist)
#if False and dist.exact:
# # Can optimize parameters
# self.qparams["nodes"] = dist.num_nodes
# self.qparams.pop("ntasks", None)
# self.qparams["ntasks_per_node"] = dist.mpi_per_node
# self.qparams["cpus_per_task"] = self.omp_threads
# self.qparams["mem"] = dist.mpi_per_node * self.mem_per_proc
# self.qparams.pop("mem_per_cpu", None)
#else:
# # Delegate to slurm.
# self.qparams["ntasks"] = self.mpi_procs
# self.qparams.pop("nodes", None)
# self.qparams.pop("ntasks_per_node", None)
# self.qparams["cpus_per_task"] = self.omp_threads
# self.qparams["mem_per_cpu"] = self.mem_per_proc
# self.qparams.pop("mem", None)
#return {}
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['sbatch', script_file], stdout=PIPE, stderr=PIPE)
process.wait()
# grab the returncode. SLURM returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(process.stdout.read().split()[3])
logger.info('Job submission was successful and queue_id is {}'.format(queue_id))
except:
# probably error parsing job code
logger.critical('Could not parse job id following slurm...')
return queue_id, process
def exclude_nodes(self, nodes):
try:
if 'exclude_nodes' not in self.qparams:
self.qparams.update({'exclude_nodes': 'node' + nodes[0]})
print('excluded node %s' % nodes[0])
for node in nodes[1:]:
self.qparams['exclude_nodes'] += ',node' + node
print('excluded node %s' % node)
return True
except (KeyError, IndexError):
return False
def _get_njobs_in_queue(self, username):
process = Popen(['squeue', '-o "%u"', '-u', username], stdout=PIPE, stderr=PIPE)
process.wait()
njobs = None
if process.returncode == 0:
# parse the result. lines should have this form:
# username
# count lines that include the username in it
outs = process.stdout.readlines()
njobs = len([line.split() for line in outs if username in line])
return njobs, process
class PbsProAdapter(QueueAdapter):
"""Adapter for PbsPro"""
QTYPE = "pbspro"
#PBS -l select=$${select}:ncpus=$${ncpus}:vmem=$${vmem}mb:mpiprocs=$${mpiprocs}:ompthreads=$${ompthreads}
#PBS -l select=$${select}:ncpus=1:vmem=$${vmem}mb:mpiprocs=1:ompthreads=$${ompthreads}
####PBS -l select=$${select}:ncpus=$${ncpus}:vmem=$${vmem}mb:mpiprocs=$${mpiprocs}:ompthreads=$${ompthreads}
####PBS -l pvmem=$${pvmem}mb
Job = PbsProJob
QTEMPLATE = """\
#!/bin/bash
#PBS -q $${queue}
#PBS -N $${job_name}
#PBS -A $${account}
#PBS -l select=$${select}
#PBS -l walltime=$${walltime}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
#PBS -M $${mail_user}
#PBS -m $${mail_type}
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(PbsProAdapter, self).set_qname(qname)
self.qparams["queue"] = qname
def set_timelimit(self, timelimit):
super(PbsProAdapter, self).set_timelimit(timelimit)
self.qparams["walltime"] = time2pbspro(timelimit)
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(PbsProAdapter, self).set_mem_per_proc(mem_mb)
#self.qparams["pvmem"] = int(mem_mb)
#self.qparams["vmem"] = int(mem_mb)
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def optimize_params(self):
return {"select": self.get_select()}
def get_select(self, ret_dict=False):
"""
Select is not the most intuitive command. For more info see:
* http://www.cardiff.ac.uk/arcca/services/equipment/User-Guide/pbs.html
* https://portal.ivec.org/docs/Supercomputers/PBS_Pro
"""
hw, mem_per_proc = self.hw, int(self.mem_per_proc)
#dist = self.distribute(self.mpi_procs, self.omp_threads, mem_per_proc)
"""
if self.pure_mpi:
num_nodes, rest_cores = hw.divmod_node(self.mpi_procs, self.omp_threads)
if num_nodes == 0:
logger.info("IN_CORE PURE MPI: %s" % self.run_info)
chunks = 1
ncpus = rest_cores
mpiprocs = rest_cores
vmem = mem_per_proc * ncpus
ompthreads = 1
elif rest_cores == 0:
# Can allocate entire nodes because self.mpi_procs is divisible by cores_per_node.
logger.info("PURE MPI run commensurate with cores_per_node %s" % self.run_info)
chunks = num_nodes
ncpus = hw.cores_per_node
mpiprocs = hw.cores_per_node
vmem = ncpus * mem_per_proc
ompthreads = 1
else:
logger.info("OUT-OF-CORE PURE MPI (not commensurate with cores_per_node): %s" % self.run_info)
chunks = self.mpi_procs
ncpus = 1
mpiprocs = 1
vmem = mem_per_proc
ompthreads = 1
elif self.pure_omp:
# Pure OMP run.
logger.info("PURE OPENMP run: %s" % self.run_info)
assert hw.can_use_omp_threads(self.omp_threads)
chunks = 1
ncpus = self.omp_threads
mpiprocs = 1
vmem = mem_per_proc
ompthreads = self.omp_threads
elif self.hybrid_mpi_omp:
assert hw.can_use_omp_threads(self.omp_threads)
num_nodes, rest_cores = hw.divmod_node(self.mpi_procs, self.omp_threads)
#print(num_nodes, rest_cores)
# TODO: test this
if rest_cores == 0 or num_nodes == 0:
logger.info("HYBRID MPI-OPENMP run, perfectly divisible among nodes: %s" % self.run_info)
chunks = max(num_nodes, 1)
mpiprocs = self.mpi_procs // chunks
chunks = chunks
ncpus = mpiprocs * self.omp_threads
mpiprocs = mpiprocs
vmem = mpiprocs * mem_per_proc
ompthreads = self.omp_threads
else:
logger.info("HYBRID MPI-OPENMP, NOT commensurate with nodes: %s" % self.run_info)
chunks=self.mpi_procs
ncpus=self.omp_threads
mpiprocs=1
vmem= mem_per_proc
ompthreads=self.omp_threads
else:
raise RuntimeError("You should not be here")
"""
if not self.has_omp:
chunks, ncpus, vmem, mpiprocs = self.mpi_procs, 1, self.mem_per_proc, 1
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(vmem))
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}".format(**select_params)
else:
chunks, ncpus, vmem, mpiprocs, ompthreads = self.mpi_procs, self.omp_threads, self.mem_per_proc, 1, self.omp_threads
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(vmem), ompthreads=ompthreads)
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}:ompthreads={ompthreads}".format(**select_params)
if ret_dict:
return s, select_params
return s
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE)
process.wait()
# grab the return code. PBS returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(process.stdout.read().split('.')[0])
except:
# probably error parsing job code
logger.critical("Could not parse job id following qsub...")
return queue_id, process
def _get_njobs_in_queue(self, username):
process = Popen(['qstat', '-a', '-u', username], stdout=PIPE, stderr=PIPE)
process.wait()
njobs = None
if process.returncode == 0:
# parse the result
# lines should have this form
# '1339044.sdb username queuename 2012-02-29-16-43 20460 -- -- -- 00:20 C 00:09'
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = process.stdout.read().split('\n')
njobs = len([line.split() for line in outs if username in line])
return njobs, process
def exclude_nodes(self, nodes):
logger.warning('exluding nodes, not implemented yet in pbs')
return False
class TorqueAdapter(PbsProAdapter):
"""Adapter for Torque."""
QTYPE = "torque"
QTEMPLATE = """\
#!/bin/bash
#PBS -q $${queue}
#PBS -N $${job_name}
#PBS -A $${account}
#PBS -l pmem=$${pmem}mb
####PBS -l mppwidth=$${mppwidth}
#PBS -l nodes=$${nodes}:ppn=$${ppn}
#PBS -l walltime=$${walltime}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
#PBS -M $${mail_user}
#PBS -m $${mail_type}
# Submission environment
#PBS -V
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
$${qverbatim}
"""
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
QueueAdapter.set_mem_per_proc(self, mem_mb)
self.qparams["pmem"] = mem_mb
self.qparams["mem"] = mem_mb
@property
def mpi_procs(self):
"""Number of MPI processes."""
return self.qparams.get("nodes", 1)*self.qparams.get("ppn", 1)
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
QueueAdapter.set_mpi_procs(mpi_procs)
self.qparams["nodes"] = 1
self.qparams["ppn"] = mpi_procs
class SGEAdapter(QueueAdapter):
"""
Adapter for Sun Grid Engine (SGE) task submission software.
See also:
* https://www.wiki.ed.ac.uk/display/EaStCHEMresearchwiki/How+to+write+a+SGE+job+submission+script
* http://www.uibk.ac.at/zid/systeme/hpc-systeme/common/tutorials/sge-howto.html
"""
QTYPE = "sge"
Job = SgeJob
QTEMPLATE = """\
#!/bin/bash
#$ -account_name $${account_name}
#$ -N $${job_name}
#$ -q $${queue_name}
#$ -pe $${parallel_environment} $${ncpus}
#$ -l h_rt=$${walltime}
# request a per slot memory limit of size bytes.
##$ -l h_vmem=$${mem_per_slot}
##$ -l mf=$${mem_per_slot}
###$ -j no
#$ -M $${mail_user}
#$ -m $${mail_type}
# Submission environment
##$ -S /bin/bash
###$ -cwd # Change to current working directory
###$ -V # Export environment variables into script
#$ -e $${_qerr_path}
#$ -o $${_qout_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(SGEAdapter, self).set_qname(qname)
self.qparams["queue_name"] = qname
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(SGEAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["ncpus"] = mpi_procs
def set_omp_threads(self, omp_threads):
super(SGEAdapter, self).set_omp_threads(omp_threads)
logger.warning("Cannot use omp_threads with SGE")
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(SGEAdapter, self).set_mem_per_proc(mem_mb)
self.qparams["mem_per_slot"] = str(int(mem_mb)) + "M"
def set_timelimit(self, timelimit):
super(SGEAdapter, self).set_timelimit(timelimit)
# Same convention as pbspro e.g. [hours:minutes:]seconds
self.qparams["walltime"] = time2pbspro(timelimit)
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE)
process.wait()
# grab the returncode. SGE returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form
# Your job 1659048 ("NAME_OF_JOB") has been submitted
queue_id = int(process.stdout.read().split(' ')[2])
except:
# probably error parsing job code
logger.critical("Could not parse job id following qsub...")
return queue_id, process
def exclude_nodes(self, nodes):
"""Method to exclude nodes in the calculation"""
logger.warning('exluding nodes, not implemented yet in SGE')
return False
def _get_njobs_in_queue(self, username):
process = Popen(['qstat', '-u', username], stdout=PIPE, stderr=PIPE)
process.wait()
njobs = None
if process.returncode == 0:
# parse the result
# lines should contain username
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = process.stdout.readlines()
njobs = len([line.split() for line in outs if username in line])
return njobs, process
class MOABAdapter(QueueAdapter):
"""Adapter for MOAB. See https://computing.llnl.gov/tutorials/moab/"""
QTYPE = "moab"
QTEMPLATE = """\
#!/bin/bash
#MSUB -a $${eligible_date}
#MSUB -A $${account}
#MSUB -c $${checkpoint_interval}
#MSUB -l feature=$${feature}
#MSUB -l gres=$${gres}
#MSUB -l nodes=$${nodes}
#MSUB -l partition=$${partition}
#MSUB -l procs=$${procs}
#MSUB -l ttc=$${ttc}
#MSUB -l walltime=$${walltime}
#MSUB -l $${resources}
#MSUB -p $${priority}
#MSUB -q $${queue}
#MSUB -S $${shell}
#MSUB -N $${job_name}
#MSUB -v $${variable_list}
#MSUB -o $${_qout_path}
#MSUB -e $${_qerr_path}
$${qverbatim}
"""
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(MOABAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["procs"] = mpi_procs
def set_timelimit(self, timelimit):
super(MOABAdapter, self).set_timelimit(timelimit)
self.qparams["walltime"] = time2slurm(timelimit)
def set_mem_per_proc(self, mem_mb):
super(MOABAdapter, self).set_mem_per_proc(mem_mb)
#TODO
#raise NotImplementedError("set_mem_per_cpu")
def exclude_nodes(self, nodes):
logger.warning('exluding nodes, not implemented yet in MOAB')
return False
def cancel(self, job_id):
return os.system("canceljob %d" % job_id)
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['msub', script_file], stdout=PIPE, stderr=PIPE)
process.wait()
queue_id = None
if process.returncode == 0:
# grab the returncode. MOAB returns 0 if the job was successful
try:
# output should be the queue_id
queue_id = int(process.stdout.read().split()[0])
except:
# probably error parsing job code
logger.critical('Could not parse job id following msub...')
return queue_id, process
def _get_njobs_in_queue(self, username):
process = Popen(['showq', '-s -u', username], stdout=PIPE, stderr=PIPE)
process.wait()
njobs = None
if process.returncode == 0:
# parse the result
# lines should have this form:
##
## active jobs: N eligible jobs: M blocked jobs: P
##
## Total job: 1
##
# Split the output string and return the last element.
out = process.stdout.readlines()[-1]
njobs = int(out.split()[-1])
return njobs, process
class QScriptTemplate(string.Template):
delimiter = '$$'
|
""" Some useful array utilities. """
from six.moves import range
import numpy as np
from itertools import product
#def to_slice(idxs):
#"""Convert an index array or list to a slice if possible. Otherwise,
#return the index array or list.
#"""
#if isinstance(idxs, slice):
#return idxs
#elif isinstance(idxs, ndarray) or isinstance(idxs, list):
#if len(idxs) == 1:
#return slice(idxs[0], idxs[0]+1)
#elif len(idxs) == 0:
#return slice(0,0)
#if isinstance(idxs, ndarray):
#imin = idxs.min()
#imax = idxs.max()
#else:
#imin = min(idxs)
#imax = max(idxs)
#stride = idxs[1]-idxs[0]
#if stride == 0:
#return idxs
#for i in range(len(idxs)):
#if i and idxs[i] - idxs[i-1] != stride:
#return idxs
#if stride < 0:
### negative strides cause some failures, so just do positive for now
##return slice(imax+1, imin, stride)
#return idxs
#else:
#return slice(imin, imax+1, stride)
#elif isinstance(idxs, int_types):
#return slice(idxs, idxs+1)
#else:
#raise RuntimeError("can't convert indices of type '%s' to a slice" %
#str(type(idxs)))
def array_idx_iter(shape):
"""
Return an iterator over the indices into a n-dimensional array.
Args
----
shape : tuple
shape of the array.
"""
for p in product(*[range(s) for s in shape]):
yield p
def evenly_distrib_idxs(num_divisions, arr_size):
"""
Given a number of divisions and the size of an array, chop the array up
into pieces according to number of divisions, keeping the distribution
of entries as even as possible.
Args
----
num_divisions : int
Number of parts to divide the array into.
arr_size : int
Number of entries in the array.
Returns
-------
tuple
a tuple of (sizes, offsets), where sizes and offsets contain values for all
divisions.
"""
base = arr_size / num_divisions
leftover = arr_size % num_divisions
sizes = np.ones(num_divisions, dtype="int") * base
# evenly distribute the remainder across size-leftover procs,
# instead of giving the whole remainder to one proc
sizes[:leftover] += 1
offsets = np.zeros(num_divisions, dtype="int")
offsets[1:] = np.cumsum(sizes)[:-1]
return sizes, offsets
|
"""
Implement a subset of the APL programming language.
Supports the monadic/dyadic functions +-×÷⌈⌊⊢⊣⍳<≤=≥>≠~⊂ ;
Supports (negative) integers/floats/complex numbers and vectors of those ;
Supports the monadic operators ⍨ and ¨ ;
Supports the dyadic operators ∘ (only functions as operands) and ⍥ ;
Supports parenthesized expressions ;
Supports multiple expressions separated by ⋄ ;
Supports comments with ⍝ ;
This is the grammar supported:
program ::= EOF statement_list
statement_list ::= (statement "⋄")* statement
statement ::= ( ID "←" | vector function | function )* vector
function ::= function mop | function dop f | f
dop ::= "∘" | "⍤" | "⍥"
mop ::= "⍨" | "¨"
f ::= "+" | "-" | "×" | "÷" | "⌈" | "⌊" |
| "⊢" | "⊣" | "⍳" | "<" | "≤" | "=" |
| "≥" | ">" | "≠" | "~" | "⊂" | "⍴" |
| "∧" | "∨" | "⍲" | "⍱" | "⊥" | "⊤" | LPARENS function RPARENS
vector ::= vector* ( scalar | ( LPARENS statement RPARENS ) )
scalar ::= INTEGER | FLOAT | COMPLEX | ID
"""
# pylint: disable=invalid-name
import argparse
import traceback
from typing import List
import doperators
import functions
import moperators
from arraymodel import APLArray
class Token:
"""Represents a token parsed from the source code."""
# "Data types"
INTEGER = "INTEGER"
FLOAT = "FLOAT"
COMPLEX = "COMPLEX"
ID = "ID"
# Functions
PLUS = "PLUS"
MINUS = "MINUS"
TIMES = "TIMES"
DIVIDE = "DIVIDE"
CEILING = "CEILING"
FLOOR = "FLOOR"
RIGHT_TACK = "RIGHT_TACK"
LEFT_TACK = "LEFT_TACK"
IOTA = "IOTA"
LESS = "LESS"
LESSEQ = "LESSEQ"
EQ = "EQ"
GREATEREQ = "GREATEREQ"
GREATER = "GREATER"
NEQ = "NEQ"
WITHOUT = "WITHOUT"
LSHOE = "LSHOE"
RHO = "RHO"
AND = "AND_"
OR = "OR_"
NAND = "NAND"
NOR = "NOR"
DECODE = "DECODE"
ENCODE = "ENCODE"
# Operators
COMMUTE = "COMMUTE"
DIAERESIS = "DIAERESIS"
JOT = "JOT"
ATOP = "ATOP"
OVER = "OVER"
# Misc
DIAMOND = "DIAMOND"
NEGATE = "NEGATE"
ASSIGNMENT = "ASSIGNMENT"
LPARENS = "LPARENS"
RPARENS = "RPARENS"
EOF = "EOF"
# Helpful lists of token types.
FUNCTIONS = [
PLUS, MINUS, TIMES, DIVIDE, FLOOR, CEILING, RIGHT_TACK, LEFT_TACK, IOTA,
LESS, LESSEQ, EQ, GREATEREQ, GREATER, NEQ, WITHOUT, LSHOE, RHO, AND, OR,
NAND, NOR, DECODE, ENCODE,
]
MONADIC_OPS = [COMMUTE, DIAERESIS]
DYADIC_OPS = [JOT, ATOP, OVER]
# Tokens that could be inside an array.
ARRAY_TOKENS = [INTEGER, FLOAT, COMPLEX, ID]
# What You See Is What You Get characters that correspond to tokens.
# The mapping from characteres to token types.
WYSIWYG_MAPPING = {
"+": PLUS,
"-": MINUS,
"×": TIMES,
"÷": DIVIDE,
"⌈": CEILING,
"⌊": FLOOR,
"⊢": RIGHT_TACK,
"⊣": LEFT_TACK,
"⍳": IOTA,
"<": LESS,
"≤": LESSEQ,
"=": EQ,
"≥": GREATEREQ,
">": GREATER,
"≠": NEQ,
"~": WITHOUT,
"⊂": LSHOE,
"⍴": RHO,
"∧": AND,
"∨": OR,
"⍲": NAND,
"⍱": NOR,
"⊥": DECODE,
"⊤": ENCODE,
"⍨": COMMUTE,
"¨": DIAERESIS,
"∘": JOT,
"⍤": ATOP,
"⍥": OVER,
"←": ASSIGNMENT,
"(": LPARENS,
")": RPARENS,
"⋄": DIAMOND,
"\n": DIAMOND,
}
ID_CHARS = "_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
def __init__(self, type_, value):
self.type = type_
self.value = value
def __str__(self):
return f"Token({self.type}, {self.value})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return (
isinstance(other, Token)
and (self.type, self.value) == (other.type, other.value)
)
class Tokenizer:
"""Class that tokenizes source code into tokens."""
def __init__(self, code):
self.code = code
self.pos = 0
self.current_char = self.code[self.pos]
def error(self, message):
"""Raises a Tokenizer error."""
raise Exception(f"TokenizerError: {message}")
def advance(self):
"""Advances the cursor position and sets the current character."""
self.pos += 1
self.current_char = None if self.pos >= len(self.code) else self.code[self.pos]
def skip_whitespace(self):
"""Skips all the whitespace in the source code."""
while self.current_char and self.current_char in " \t":
self.advance()
def skip_comment(self):
"""Skips commented code."""
if not self.current_char == "⍝":
return
while self.current_char and self.current_char != "\n":
self.advance()
def get_integer(self):
"""Parses an integer from the source code."""
start_idx = self.pos
while self.current_char and self.current_char.isdigit():
self.advance()
return self.code[start_idx:self.pos] or "0"
def get_real_number(self):
"""Parses a real number from the source code."""
# Check for a negation of the number.
if self.current_char == "¯":
self.advance()
int_ = "-" + self.get_integer()
else:
int_ = self.get_integer()
# Check if we have a decimal number here.
if self.current_char == ".":
self.advance()
dec_ = self.get_integer()
else:
dec_ = "0"
if int(dec_):
return float(f"{int_}.{dec_}")
else:
return int(int_)
def get_number_token(self):
"""Parses a number token from the source code."""
real = self.get_real_number()
if self.current_char == "J":
self.advance()
im = self.get_real_number()
else:
im = 0
if im:
tok = Token(Token.COMPLEX, complex(real, im))
elif isinstance(real, int):
tok = Token(Token.INTEGER, real)
elif isinstance(real, float):
tok = Token(Token.FLOAT, real)
else:
self.error("Cannot recognize type of number.")
return tok
def get_id_token(self):
"""Retrieves an identifier token."""
start = self.pos
while self.current_char and self.current_char in Token.ID_CHARS:
self.advance()
return Token(Token.ID, self.code[start:self.pos])
def get_wysiwyg_token(self):
"""Retrieves a WYSIWYG token."""
char = self.current_char
self.advance()
try:
return Token(Token.WYSIWYG_MAPPING[char], char)
except KeyError:
self.error("Could not parse WYSIWYG token.")
def get_next_token(self):
"""Finds the next token in the source code."""
self.skip_whitespace()
self.skip_comment()
if not self.current_char:
return Token(Token.EOF, None)
if self.current_char in "¯.0123456789":
return self.get_number_token()
if self.current_char in Token.ID_CHARS:
return self.get_id_token()
if self.current_char in Token.WYSIWYG_MAPPING:
return self.get_wysiwyg_token()
self.error("Could not parse the next token...")
def tokenize(self):
"""Returns the whole token list."""
tokens = [self.get_next_token()]
while tokens[-1].type != Token.EOF:
tokens.append(self.get_next_token())
# Move the EOF token to the beginning of the list.
return [tokens[-1]] + tokens[:-1]
class ASTNode:
"""Stub class to be inherited by the different types of AST nodes.
The AST Nodes are used by the Parser instances to build an
Abstract Syntax Tree out of the APL programs.
These ASTs can then be traversed to interpret an APL program.
"""
def __repr__(self):
return self.__str__()
class S(ASTNode):
"""Node for a simple scalar like 3 or ¯4.2"""
def __init__(self, token: Token):
self.token = token
self.value = self.token.value
def __str__(self):
return f"S({self.value})"
class V(ASTNode):
"""Node for a stranded vector of simple scalars, like 3 ¯4 5.6"""
def __init__(self, children: List[ASTNode]):
self.children = children
def __str__(self):
return f"V({self.children})"
class MOp(ASTNode):
"""Node for monadic operators like ⍨"""
def __init__(self, token: Token, child: ASTNode):
self.token = token
self.operator = self.token.value
self.child = child
def __str__(self):
return f"MOp({self.operator} {self.child})"
class DOp(ASTNode):
"""Node for dyadic operators like ∘"""
def __init__(self, token: Token, left: ASTNode, right: ASTNode):
self.token = token
self.operator = self.token.value
self.left = left
self.right = right
def __str__(self):
return f"DOP({self.left} {self.operator} {self.right})"
class F(ASTNode):
"""Node for built-in functions like + or ⌈"""
def __init__(self, token: Token):
self.token = token
self.function = self.token.value
def __str__(self):
return f"F({self.function})"
class Monad(ASTNode):
"""Node for monadic function calls."""
def __init__(self, function: ASTNode, omega: ASTNode):
self.function = function
self.omega = omega
def __str__(self):
return f"Monad({self.function} {self.omega})"
class Dyad(ASTNode):
"""Node for dyadic functions."""
def __init__(self, function: ASTNode, alpha: ASTNode, omega: ASTNode):
self.function = function
self.alpha = alpha
self.omega = omega
def __str__(self):
return f"Dyad({self.function} {self.alpha} {self.omega})"
class Assignment(ASTNode):
"""Node for assignment expressions."""
def __init__(self, varname: ASTNode, value: ASTNode):
self.varname = varname
self.value = value
def __str__(self):
return f"Assignment({self.varname.token.value} ← {self.value})"
class Var(ASTNode):
"""Node for variable references."""
def __init__(self, token: Token):
self.token = token
self.name = self.token.value
def __str__(self):
return f"Var({self.token.value})"
class Statements(ASTNode):
"""Node to represent a series of consecutive statements."""
def __init__(self):
self.children = []
def __str__(self):
return str(self.children)
class Parser:
"""Implements a parser for a subset of the APL language.
The grammar parsed is available at the module-level docstring.
"""
def __init__(self, tokenizer, debug=False):
self.tokens = tokenizer.tokenize()
self.pos = len(self.tokens) - 1
self.token_at = self.tokens[self.pos]
self.debug_on = debug
def debug(self, message):
"""If the debugging option is on, print a message."""
if self.debug_on:
print(f"PD @ {message}")
def error(self, message):
"""Throws a Parser-specific error message."""
raise Exception(f"Parser: {message}")
def eat(self, token_type):
"""Checks if the current token matches the expected token type."""
if self.token_at.type != token_type:
self.error(f"Expected {token_type} and got {self.token_at.type}.")
else:
self.pos -= 1
self.token_at = None if self.pos < 0 else self.tokens[self.pos]
def peek(self):
"""Returns the next token type without consuming it."""
peek_at = self.pos - 1
return None if peek_at < 0 else self.tokens[peek_at].type
def peek_beyond_parens(self):
"""Returns the next token type that is not a right parenthesis."""
peek_at = self.pos - 1
while peek_at >= 0 and self.tokens[peek_at].type == Token.RPARENS:
peek_at -= 1
return None if peek_at < 0 else self.tokens[peek_at].type
def parse_program(self):
"""Parses a full program."""
self.debug(f"Parsing program from {self.tokens}")
statement_list = self.parse_statement_list()
self.eat(Token.EOF)
return statement_list
def parse_statement_list(self):
"""Parses a list of statements."""
self.debug(f"Parsing a statement list from {self.tokens}")
root = Statements()
statements = [self.parse_statement()]
while self.token_at.type == Token.DIAMOND:
self.eat(Token.DIAMOND)
statements.append(self.parse_statement())
root.children = statements
return root
def parse_statement(self):
"""Parses a statement."""
self.debug(f"Parsing statement from {self.tokens[:self.pos+1]}")
relevant_types = [Token.ASSIGNMENT, Token.RPARENS] + Token.FUNCTIONS + Token.MONADIC_OPS
statement = self.parse_vector()
while self.token_at.type in relevant_types:
if self.token_at.type == Token.ASSIGNMENT:
self.eat(Token.ASSIGNMENT)
statement = Assignment(Var(self.token_at), statement)
self.eat(Token.ID)
else:
function = self.parse_function()
if self.token_at.type in [Token.RPARENS] + Token.ARRAY_TOKENS:
array = self.parse_vector()
statement = Dyad(function, array, statement)
else:
statement = Monad(function, statement)
return statement
def parse_vector(self):
"""Parses a vector composed of possibly several simple scalars."""
self.debug(f"Parsing vector from {self.tokens[:self.pos+1]}")
nodes = []
while self.token_at.type in Token.ARRAY_TOKENS + [Token.RPARENS]:
if self.token_at.type == Token.RPARENS:
if self.peek_beyond_parens() in Token.ARRAY_TOKENS:
self.eat(Token.RPARENS)
nodes.append(self.parse_statement())
self.eat(Token.LPARENS)
else:
break
else:
nodes.append(self.parse_scalar())
nodes = nodes[::-1]
if not nodes:
self.error("Failed to parse scalars inside a vector.")
elif len(nodes) == 1:
node = nodes[0]
else:
node = V(nodes)
return node
def parse_scalar(self):
"""Parses a simple scalar."""
self.debug(f"Parsing scalar from {self.tokens[:self.pos+1]}")
if self.token_at.type == Token.ID:
scalar = Var(self.token_at)
self.eat(Token.ID)
elif self.token_at.type == Token.INTEGER:
scalar = S(self.token_at)
self.eat(Token.INTEGER)
elif self.token_at.type == Token.FLOAT:
scalar = S(self.token_at)
self.eat(Token.FLOAT)
else:
scalar = S(self.token_at)
self.eat(Token.COMPLEX)
return scalar
def parse_function(self):
"""Parses a (derived) function."""
self.debug(f"Parsing function from {self.tokens[:self.pos+1]}")
if self.token_at.type in Token.MONADIC_OPS:
function = self.parse_mop()
function.child = self.parse_function()
else:
function = self.parse_f()
if self.token_at.type in Token.DYADIC_OPS:
dop = DOp(self.token_at, None, function)
self.eat(dop.token.type)
dop.left = self.parse_function()
function = dop
return function
def parse_mop(self):
"""Parses a monadic operator."""
self.debug(f"Parsing a mop from {self.tokens[:self.pos+1]}")
mop = MOp(self.token_at, None)
if (t := self.token_at.type) not in Token.MONADIC_OPS:
self.error(f"{t} is not a valid monadic operator.")
self.eat(t)
return mop
def parse_f(self):
"""Parses a simple one-character function."""
self.debug(f"Parsing f from {self.tokens[:self.pos+1]}")
if (t := self.token_at.type) in Token.FUNCTIONS:
f = F(self.token_at)
self.eat(t)
else:
self.eat(Token.RPARENS)
f = self.parse_function()
self.eat(Token.LPARENS)
return f
def parse(self):
"""Parses the whole AST."""
return self.parse_program()
class NodeVisitor:
"""Base class for the node visitor pattern."""
def visit(self, node):
"""Dispatches the visit call to the appropriate function."""
method_name = f"visit_{type(node).__name__}"
visitor = getattr(self, method_name, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Default method for unknown nodes."""
raise Exception(f"No visit method for {type(node).__name__}")
class Interpreter(NodeVisitor):
"""APL interpreter using the visitor pattern."""
def __init__(self, parser):
self.parser = parser
self.var_lookup = {}
def visit_S(self, scalar):
"""Returns the value of a scalar."""
return APLArray([], [scalar.value])
def visit_V(self, array):
"""Returns the value of an array."""
scalars = [self.visit(child) for child in array.children]
return APLArray([len(scalars)], scalars)
def visit_Var(self, var):
"""Tries to fetch the value of a variable."""
return self.var_lookup[var.name]
def visit_Statements(self, statements):
"""Visits each statement in order."""
return [self.visit(child) for child in statements.children[::-1]][-1]
def visit_Assignment(self, assignment):
"""Assigns a value to a variable."""
value = self.visit(assignment.value)
varname = assignment.varname.name
self.var_lookup[varname] = value
return value
def visit_Monad(self, monad):
"""Evaluate the function on its only argument."""
function = self.visit(monad.function)
omega = self.visit(monad.omega)
return function(omega=omega)
def visit_Dyad(self, dyad):
"""Evaluate a dyad on both its arguments."""
function = self.visit(dyad.function)
omega = self.visit(dyad.omega)
alpha = self.visit(dyad.alpha)
return function(alpha=alpha, omega=omega)
def visit_F(self, func):
"""Fetch the callable function."""
name = func.token.type.lower()
function = getattr(functions, name, None)
if function is None:
raise Exception(f"Could not find function {name}.")
return function
def visit_MOp(self, mop):
"""Fetch the operand and alter it."""
aalpha = self.visit(mop.child)
name = mop.token.type.lower()
operator = getattr(moperators, name, None)
if operator is None:
raise Exception(f"Could not find monadic operator {name}.")
return operator(aalpha=aalpha)
def visit_DOp(self, dop):
"""Fetch the operands and alter them as needed."""
oomega = self.visit(dop.right)
aalpha = self.visit(dop.left)
name = dop.token.type.lower()
operator = getattr(doperators, name, None)
if operator is None:
raise Exception(f"Could not find dyadic operator {name}.")
return operator(aalpha=aalpha, oomega=oomega)
def interpret(self):
"""Interpret the APL code the parser was given."""
tree = self.parser.parse()
return self.visit(tree)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="Parse and interpret an APL program.")
arg_parser.add_argument("-d", "--debug", action="store_true")
main_group = arg_parser.add_mutually_exclusive_group()
main_group.add_argument(
"--repl",
action="store_true",
help="starts a REPL session",
)
main_group.add_argument(
"-f",
"--file",
nargs=1,
metavar="filename",
help="filename with code to parse and interpret",
type=str,
)
main_group.add_argument(
"-c",
"--code",
nargs="+",
metavar="expression",
help="expression(s) to be interpreted",
type=str,
)
args = arg_parser.parse_args()
if args.repl:
print("Please notice that, from one input line to the next, variables aren't stored (yet).")
while inp := input(" >> "):
try:
print(Interpreter(Parser(Tokenizer(inp), debug=args.debug)).interpret())
except Exception as error:
traceback.print_exc()
elif args.code:
for expr in args.code:
print(f"{expr} :")
print(Interpreter(Parser(Tokenizer(expr), debug=args.debug)).interpret())
elif args.file:
print("Not implemented yet...")
else:
arg_parser.print_usage()
|
from elasticsearch import Elasticsearch
import ast
import nltk
import string
import sys
# specific json text parsing for this data-set. Must re-implement for other data sets
# Returns a list of review text
def pull_data(query_size=50):
es = Elasticsearch(['localhost:9200'], http_auth=('elastic','changeme'))
train_set = []
test_set = []
for i in range(1,6):
res = es.search(index='yelp', q='stars:'+str(i),size=query_size)
for j in range(0,query_size/2):
try:
train_set.append((res['hits']['hits'][j]['_source']['text'],res['hits']['hits'][j]['_source']['stars']))
except Exception as e:
pass
for j in range(query_size/2,query_size):
try:
test_set.append((res['hits']['hits'][j]['_source']['text'],res['hits']['hits'][j]['_source']['stars']))
except Exception as e:
pass
return train_set, test_set
# Returns a dictionary of the word features of a string
def word_feats(words, n=1):
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
words = tokenizer.tokenize(words.lower())
word_copy = []
if n > 1:
for i in range(0, len(words) - n + 1):
for j in range(1, n):
words[i] = words[i] + " " + words[i+j]
word_copy.append(words[i])
return dict([(word, True) for word in word_copy])
return dict([(word, True) for word in words])
# argv[1] = number of reviews to pull from db
# argv[2] = top n most informative features
# argv[3] = n for n-gram (optional, default to unigram if omitted)
if __name__ == "__main__":
train_set, test_set = pull_data(int(sys.argv[1]))
n_gram = 1
if len(sys.argv) > 3:
n_gram = int(sys.argv[3])
train_set = [(word_feats(review, n_gram), stars) for (review, stars) in train_set]
test_set = [(word_feats(review, n_gram), stars) for (review, stars) in test_set]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print 'accuracy:', nltk.classify.util.accuracy(classifier, test_set)
classifier.labels()
classifier.show_most_informative_features(int(sys.argv[2]))
|
#
# Copyright (c) 2019, 2020 Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fdk import constants
from fdk import response
class DispatchException(Exception):
def __init__(self, ctx, status, message):
"""
JSON response with error
:param status: HTTP status code
:param message: error message
"""
self.status = status
self.message = message
self.ctx = ctx
def response(self):
resp_headers = {
constants.CONTENT_TYPE: "application/json; charset=utf-8",
}
return response.Response(
self.ctx,
response_data=self.message,
headers=resp_headers,
status_code=self.status
)
|
"""Test wrong feature syntax."""
import os.path
import re
import sys
import mock
import pytest
from pytest_bdd import scenario, scenarios, given, when, then
from pytest_bdd.feature import features
from pytest_bdd import exceptions
@given('something')
def given_something():
pass
@when('something else')
def when_something_else():
pass
@then('nevermind')
def then_nevermind():
pass
@pytest.mark.parametrize(
('feature', 'scenario_name'),
[
('when_in_background.feature', 'When in background'),
('when_after_then.feature', 'When after then'),
('then_first.feature', 'Then first'),
('given_after_when.feature', 'Given after When'),
('given_after_then.feature', 'Given after Then'),
]
)
@pytest.mark.parametrize('strict_gherkin', [True, False])
@pytest.mark.parametrize('multiple', [True, False])
def test_wrong(request, feature, scenario_name, strict_gherkin, multiple):
"""Test wrong feature scenarios."""
def declare_scenario():
if multiple:
scenarios(feature, strict_gherkin=strict_gherkin)
else:
@scenario(feature, scenario_name, strict_gherkin=strict_gherkin)
def test_scenario():
pass
if strict_gherkin:
with pytest.raises(exceptions.FeatureError):
declare_scenario()
# TODO: assert the exception args from parameters
else:
declare_scenario()
def clean_cache():
features.clear()
request.addfinalizer(clean_cache)
@pytest.mark.parametrize(
'scenario_name',
[
'When in Given',
'When in Then',
'Then in Given',
'Given in When',
'Given in Then',
'Then in When',
]
)
def test_wrong_type_order(request, scenario_name):
"""Test wrong step type order."""
@scenario('wrong_type_order.feature', scenario_name)
def test_wrong_type_order(request):
pass
with pytest.raises(exceptions.StepDefinitionNotFoundError) as excinfo:
test_wrong_type_order(request)
assert re.match(r'Step definition is not found: (.+)', excinfo.value.args[0])
def test_verbose_output():
"""Test verbose output of failed feature scenario."""
with pytest.raises(exceptions.FeatureError) as excinfo:
scenario('when_after_then.feature', 'When after then')
msg, line_number, line, file = excinfo.value.args
assert line_number == 5
assert line == 'When I do it again'
assert file == os.path.join(os.path.dirname(__file__), 'when_after_then.feature')
assert line in str(excinfo.value)
def test_multiple_features_single_file():
"""Test validation error when multiple features are placed in a single file."""
with pytest.raises(exceptions.FeatureError) as excinfo:
scenarios('wrong_multiple_features.feature')
assert excinfo.value.args[0] == 'Multiple features are not allowed in a single feature file'
|
import param
from nick_derobertis_site.common.component import HTMLComponent
from .carousel_model import CarouselModel
from nick_derobertis_site.landing.components.carousel.carousel_item.carousel_item_component import (
CarouselItemComponent,
)
class CarouselComponent(HTMLComponent):
model = param.ClassSelector(class_=CarouselModel)
items = param.List(class_=CarouselItemComponent)
def __init__(self, **params):
params["items"] = [
CarouselItemComponent(model=model, services=params["services"])
for model in params["model"].item_models
]
super().__init__(**params)
|
from django.conf.urls import url, include
from . import views
app_name = 'posts'
urlpatterns = [
url(r'^create/', views.create, name='create'),
url(r'^(?P<pk>[0-9]+)/upvote', views.upvote, name='upvote'),
url(r'^(?P<pk>[0-9]+)/downvote', views.downvote, name='downvote'),
url(r'^user/(?P<fk>[0-9]+)', views.userposts, name='userposts'),
]
|
import os
import infra.basetest
def boot_armv5_cpio(emulator, builddir):
img = os.path.join(builddir, "images", "rootfs.cpio")
emulator.boot(arch="armv5", kernel="builtin",
options=["-initrd", img])
emulator.login()
class TestNoTimezone(infra.basetest.BRTest):
config = infra.basetest.BASIC_TOOLCHAIN_CONFIG + \
"""
# BR2_TARGET_TZ_INFO is not set
BR2_TARGET_ROOTFS_CPIO=y
# BR2_TARGET_ROOTFS_TAR is not set
"""
def test_run(self):
boot_armv5_cpio(self.emulator, self.builddir)
tz, _ = self.emulator.run("TZ=UTC date +%Z")
self.assertEqual(tz[0].strip(), "UTC")
tz, _ = self.emulator.run("TZ=America/Los_Angeles date +%Z")
self.assertEqual(tz[0].strip(), "UTC")
class TestGlibcAllTimezone(infra.basetest.BRTest):
config = \
"""
BR2_arm=y
BR2_TOOLCHAIN_EXTERNAL=y
BR2_TARGET_TZ_INFO=y
BR2_TARGET_ROOTFS_CPIO=y
# BR2_TARGET_ROOTFS_TAR is not set
"""
def test_run(self):
boot_armv5_cpio(self.emulator, self.builddir)
tz, _ = self.emulator.run("date +%Z")
self.assertEqual(tz[0].strip(), "UTC")
tz, _ = self.emulator.run("TZ=UTC date +%Z")
self.assertEqual(tz[0].strip(), "UTC")
tz, _ = self.emulator.run("TZ=America/Los_Angeles date +%Z")
self.assertEqual(tz[0].strip(), "PST")
tz, _ = self.emulator.run("TZ=Europe/Paris date +%Z")
self.assertEqual(tz[0].strip(), "CET")
class TestGlibcNonDefaultLimitedTimezone(infra.basetest.BRTest):
config = \
"""
BR2_arm=y
BR2_TOOLCHAIN_EXTERNAL=y
BR2_TARGET_TZ_INFO=y
BR2_TARGET_TZ_ZONELIST="northamerica"
BR2_TARGET_LOCALTIME="America/New_York"
BR2_TARGET_ROOTFS_CPIO=y
# BR2_TARGET_ROOTFS_TAR is not set
"""
def test_run(self):
boot_armv5_cpio(self.emulator, self.builddir)
tz, _ = self.emulator.run("date +%Z")
self.assertEqual(tz[0].strip(), "EST")
tz, _ = self.emulator.run("TZ=UTC date +%Z")
self.assertEqual(tz[0].strip(), "UTC")
tz, _ = self.emulator.run("TZ=America/Los_Angeles date +%Z")
self.assertEqual(tz[0].strip(), "PST")
tz, _ = self.emulator.run("TZ=Europe/Paris date +%Z")
self.assertEqual(tz[0].strip(), "Europe")
|
from scraper import do_scrape, get_df
import settings
import time
import sys
import traceback
if __name__ == "__main__":
while True:
print("{}: Starting scrape cycle".format(time.ctime()))
try:
x = do_scrape()
except KeyboardInterrupt:
print("Exiting....")
df = get_df()
df.to_excel(r'C:\Users\sdoggett\Documents\GitHub\apartment-finder\Test\results.xlsx')
sys.exit(1)
except Exception:
print("Error with the scraping:", sys.exc_info()[0])
traceback.print_exc()
sys.exit(1)
else:
print("{}: Successfully finished scraping".format(time.ctime()))
df = get_df()
df.to_excel(r'C:\Users\sdoggett\Documents\GitHub\apartment-finder\Test\results.xlsx')
break
#time.sleep(settings.SLEEP_INTERVAL)
|
from typing import Dict
def Main():
a: Dict[int, int] = {1: 15, 2: 14, 3: 13}
|
from django.conf.urls import *
from views import *
urlpatterns = patterns('',
url(r'^$', create, name='create.create'),
url(r'^([^//]+)/$', create, name='create.create'),
url(r'^([^//]+)/initSave$', initSave, name='create.initSave$'),
url(r'^([^//]+)/uploadSave$', uploadSave, name='create.uploadSave$'),
)
|
# Copyright 2021 Peng Cheng Laboratory (http://www.szpclab.com/) and FedLab Authors (smilelab.group)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from torch.utils.data import Dataset
class FemnistDataset(Dataset):
def __init__(self, client_id: int, client_str: str, data: list, targets: list):
"""get `Dataset` for femnist dataset
Args:
client_id (int): client id
client_str (str): client name string
data (list): image data list
targets (list): image class target list
"""
self.client_id = client_id
self.client_str = client_str
self.data = data
self.targets = targets
self._process_data_target()
def _process_data_target(self):
"""process client's data and target
"""
self.data = torch.tensor(self.data, dtype=torch.float32).reshape(-1, 1, 28, 28)
self.targets = torch.tensor(self.targets, dtype=torch.long)
def __len__(self):
return len(self.targets)
def __getitem__(self, index):
return self.data[index], self.targets[index]
|
from flask import Flask, render_template, url_for, redirect, request, session, send_file
from werkzeug.utils import secure_filename
import os, os.path
import sys
import shutil
# full path to project
project_path = os.path.dirname(os.path.realpath(__file__))+"/"
#importing text detection
sys.path.append(project_path+"text-detection/")
import start
#importing text recognition
sys.path.append(project_path+"text-recognition/")
import demo
import moviepy.editor as mp
import pandas as pd
import openpyxl
import psycopg2
from sqlalchemy import create_engine, text
app = Flask(__name__)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
app.secret_key = os.urandom(24)
app.config['TEMPLATES_AUTO_RELOAD'] = True
#db url etc.
db_url = 'postgresql+psycopg2://login:password@localhost:5432/mydb'
@app.route("/") # this sets the route to this page
def home():
return render_template("index.html")
def convert(source,target):
#avi => mp4
clip = mp.VideoFileClip(source)
#may not work so I left it commented (moviepy can complain about the absence of a codec)
#clip.write_videofile(target)
def read_table(session,engine):
#reading and sorting data
ret = engine.dialect.has_table(engine, 'table_' + session)
if ret is True:
df = pd.read_sql_table('table_' + session, engine)
df = df.sort_values(by='finish_time', ascending=True)
return df
else:
return pd.DataFrame()
def write_html(target,df):
html_file = open(target, 'w')
html_text = str(df.to_html())
html_text = html_text.replace('<', '<')
html_text = html_text.replace('>', '>')
#some magic to make our html page a dynamic page with edit
html_text = html_text.replace('<thead>', '{% set count = 0 %}<thead>')
html_text = html_text.replace('width="500" ></td>', 'width="500" ></td><td><form action = "/edit/{{count}}"><button type = "submit">Edit</button></form></td> {% set count = count + 1 %}')
html_text = html_text.replace('</table>','</table> <body class="body"> <div class="container" align="left"> <a href="/return-files/" target="blank"><button>Download excel</button></a></div></body>')
# writing dataframe to html
html_file.write(html_text)
html_file.close()
def setUserInSession():
if 'visits' in session:
session['visits'] = session.get('visits') + 1 # обновление данных сессии
else:
session['visits'] = 1 # запись данных в сессию
def getUserInSession():
return session.get('visits').__str__()
#xlsx downloading
@app.route('/return-files/')
def return_file():
session = getUserInSession()
return send_file(project_path+'static/'+session+'/results.xlsx', attachment_filename='results.xlsx')
#editing plate number
@app.route('/edit/<int:id>')
def edit(id):
session = getUserInSession()
#getting old plate number to show it in input field (it`s updating after all)
engine = create_engine(db_url)
conn = engine.connect()
sql = text('SELECT plate_number from table_' + session + ' where index = ' + id.__str__() + ';')
result = conn.execute(sql).fetchone()
return render_template("edit.html", id = id.__str__(), number = ''.join(x for x in result if x.isdigit()))
@app.route('/update/<int:id>', methods=['GET', 'POST'])
def update(id):
session = getUserInSession()
#getting new plate number from input field and updating table
number = request.form.get('number')
engine = create_engine(db_url)
sql = text('UPDATE table_' + session + ' SET PLATE_NUMBER = ' + number + ' WHERE INDEX = ' + id.__str__() +';')
engine.execute(sql)
#reading updated table and writing new data to html
df = read_table(session, engine)
write_html('templates/' + session + '.html', df)
df.to_excel('static/'+ session + '/results.xlsx')
return render_template(session+".html")
#uploading video
@app.route('/upload', methods = ['POST'])
def upload():
#getting file from request
file = request.files['inputFile']
name = file.filename
#getting extention of the file
ext = name.split('.')[1]
setUserInSession()
session = getUserInSession()
#preparing directory
target = os.path.join(APP_ROOT,"static/")
if not os.path.isdir(target):
os.mkdir(target)
#video number = session number in order to not mix up users` videos
destination = "/".join([target,session+ "." +ext])
file.save(destination)
#html can`t work with avi so we should convert video to mp4
if ext != 'mp4':
convert("static/"+session+"." +ext,"static/"+session+".mp4")
clip = mp.VideoFileClip("static/"+session+"." +ext)
return render_template("preview.html", video = session + ".mp4", time = 60 + clip.duration*3, ext = ext)
#video analysis
@app.route('/process', methods = ['POST'])
def process():
session = getUserInSession()
ext = request.form.get('ext')
#dropping table with the same number as the current session has if it somehow exists
engine = create_engine(db_url)
sql = text('DROP TABLE IF EXISTS table_' + session + ';')
engine.execute(sql)
#calling tracker from cmd that may be not correct (perhaps having the entire tracker code as an imported module would be better)
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
os.system(
"conda activate project & python object_tracker.py --video static/" + session + "." + ext + " --output data/video/output.mp4")
#calling text detector
start.main(project_path+"static/" + session + "/", project_path+"static/" + session + "/")
#calling text recogniser, 2nd parameter helps us to get db name etc
demo.main(project_path+"static/"+session+"/", len(project_path)+7,db_url)
#reading table and writing data to html
df = read_table(session,engine)
write_html('templates/'+session+'.html',df)
df.to_excel('static/'+ session + '/results.xlsx')
return render_template(session+".html")
if __name__ == "__main__":
shutil.rmtree(project_path+"/static/")
os.mkdir(project_path+"/static/")
app.run(host='0.0.0.0', port=5000)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.