content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Exception classes are located here
'''
class InvalidResponseFormat(Exception):
"""Error when the response which was gotten from api server couldn't be deserialize"""
def __init__(self, path, method, body):
self.path = path
self.method = method
self.body = body
Exception.__init__(self)
def __str__(self):
return "Can't parse response from {} with method {}".format(
self.path, self.method
)
class UnauthorizedAccess(Exception):
"""Error when username or/and password are incorrect"""
def __init__(self, username):
self.username = username
Exception.__init__(self)
def __str__(self):
return "Fail to get access for {}".format(self.username)
class AccessDenied(Exception):
"""Error when access for username for requested resource is denied"""
def __init__(self, username, path, method):
self.username = username
self.path = path
self.method = method
Exception.__init__(self)
def __str__(self):
return "For {} access denied to {} with method {}".format(
self.username, self.path, self.method
)
class UnexpectedError(Exception):
"""Error when response has unexpected response code"""
def __init__(self, status, path, method, body):
self.status = status
self.path = path
self.method = method
self.body = body
Exception.__init__(self)
def __str__(self):
return "For {} with method {} is got unexpected status code {}".format(
self.path, self.method, self.status
)
|
for i in range(1, 10):
for j in range(1, 10):
print(i * j, end=' ')
print(end='\n') |
"""
LIST: REMOVE NULLS (SINGLE LIST)
"""
__author__ = 'Sol Amour - amoursol@gmail.com'
__twitter__ = '@solamour'
__version__ = '1.0.0'
data = ["a", None, 17.1, 3, "C", "X", None, 4] # A mixed list of data types inclusive
# of nulls (None's)
results = [] # An empty container list to which we will append (Add to) our results
for item in data: # A 'For' loop that we run over the entire list (data) of inputs
if item is not None: # A conditional check that says: "If an item in the list
# called 'data' 'is not None' (That is - is valid) - simply add it to
# our output list, otherwise pass to the next item"
results.append(item) # If the conditional check is sucessful, then
# append that element to our container list called 'results'
OUT = results
|
"""generate openapi docs."""
from honeybee_schema._openapi import get_openapi
from honeybee_schema.model import Model
from honeybee_schema.energy.simulation import SimulationParameter
import json
# generate Model open api schema
print('Generating Model documentation...')
openapi = get_openapi(
[Model],
title='Honeybee Model Schema',
description='This is the documentation for Honeybee model schema.')
with open('./docs/model.json', 'w') as out_file:
json.dump(openapi, out_file, indent=2)
# generate SimulationParameter open api schema
print('Generating Energy Simulation Parameter documentation...')
openapi = get_openapi(
[SimulationParameter],
title='Honeybee Energy Simulation Parameter Schema',
description='This is the documentation for Honeybee energy simulation parameter schema.')
with open('./docs/simulation-parameter.json', 'w') as out_file:
json.dump(openapi, out_file, indent=2)
|
#Primero, pedimos los tres lados del triangulo
lado_a = float(raw_input("Ingrese a: "))
lado_b = float(raw_input("Ingrese b: "))
lado_c = float(raw_input("Ingrese c: "))
#Luego, comprobamos la desigualdad triangular y, si aplica, el tipo de triangulo
if (abs(lado_a - lado_c) < lado_b) and (lado_b < abs(lado_a + lado_c)):
if (lado_a == lado_b) and (lado_b == lado_c):
print "El triangulo es equilatero"
elif (lado_a == lado_b) or (lado_a == lado_c) or (lado_b == lado_c):
print "El ttriangulo es isoceles"
elif (lado_a != lado_b) or (lado_a != lado_c) or (lado_b != lado_c):
print "El triangulo es escaleno"
else:
print "No es un triangulo valido" |
#!/usr/bin/env python
# Add "common-apps" folder to sys.path if it exists
import os, sys
common_dir = os.path.join(os.path.dirname(__file__), 'common-apps')
if os.path.exists(common_dir):
sys.path.append(common_dir)
# Initialize App Engine SDK if djangoappengine backend is installed
try:
from djangoappengine.boot import setup_env
except ImportError:
pass
else:
setup_env()
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
|
from nn_wtf.data_set_base import DataSetBase
import tensorflow as tf
__author__ = 'Lene Preuss <lene.preuss@gmail.com>'
class NeuralNetworkGraphBase:
def __init__(self, input_size, layer_sizes, output_size):
"""Initialize a neural network given its geometry.
:param input_size: number of input channels
:param layer_sizes: tuple of sizes of the neural network hidden layers
:param output_size: number of output classes
"""
self._setup_geometry(input_size, layer_sizes, output_size)
self.predictor = None
self.trainer = None
self.session = None
self.layers = []
self.input_placeholder = tf.placeholder(tf.float32, shape=(None, self.input_size), name='input')
self.labels_placeholder = tf.placeholder(tf.int32, shape=(None,), name='labels')
self._build_neural_network()
def output_layer(self):
raise NotImplementedError
def fill_feed_dict(self, data_set, batch_size):
"""Fills the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
:param data_set: The set of images and labels
:param batch_size: Number of data sets to work on as one batch
:return The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next `batch size ` examples.
assert isinstance(data_set, DataSetBase)
input_feed, labels_feed = data_set.next_batch(batch_size)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
}
return feed_dict
############################################################################
def _setup_geometry(self, input_size, layer_sizes, output_size):
self.input_size = int(input_size)
self.output_size = int(output_size)
self.layer_sizes = self._set_layer_sizes(layer_sizes)
self.num_hidden_layers = len(self.layer_sizes) - 1
def _set_layer_sizes(self, layer_sizes):
layer_sizes = tuple(filter(None, layer_sizes))
if layer_sizes[-1] < self.output_size:
raise ValueError('Last layer size must be greater or equal output size')
return (self.input_size,) + layer_sizes
|
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: zeng_bin8888@163.com
create_dt: 2022/2/16 20:31
describe: czsc.utils 单元测试
"""
from czsc import utils
def test_x_round():
assert utils.x_round(100, 3) == 100
assert utils.x_round(1.000342, 3) == 1.0
assert utils.x_round(1.000342, 4) == 1.0003
assert utils.x_round(1.000342, 5) == 1.00034
|
import sys
import pandas as pd
_, *input_files, output_file = sys.argv
df_list = []
for input_file in input_files:
df_list.append(pd.read_csv(input_file))
#df_list = [pd.read_hdf(f) for f in input_files]
pd.concat(df_list).to_csv(output_file, index=False)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Ce module implémente une version simple du Jeu Tetris
"""
__version__ = "0.0.6"
from tetris_game.core import Square, Line, Shape, Menu, TetrisFrame, TetrisGame
from tetris_game.constantes import *
|
#! python3
# -*- coding: utf-8 -*-
import inspect
import wx
from wx import aui
from wx import stc
import wx.lib.eventwatcher as ew
from mwx.framework import FSM
if wx.VERSION < (4,1,0):
from wx.lib.mixins.listctrl import CheckListCtrlMixin
class _ListCtrl(wx.ListCtrl, CheckListCtrlMixin):
def __init__(self, *args, **kwargs):
wx.ListCtrl.__init__(self, *args, **kwargs)
CheckListCtrlMixin.__init__(self)
self.IsItemChecked = self.IsChecked # for wx 4.1 compatibility
else:
class _ListCtrl(wx.ListCtrl):
def __init__(self, *args, **kwargs):
wx.ListCtrl.__init__(self, *args, **kwargs)
self.EnableCheckBoxes()
def where(obj):
try:
filename = inspect.getsourcefile(obj)
src, lineno = inspect.getsourcelines(obj)
return "{!s}:{}:{!s}".format(filename, lineno, src[0].rstrip())
except TypeError:
return repr(obj)
class EventMonitor(_ListCtrl):
"""Event monitor of the inspector
*** Inspired by wx.lib.eventwatcher ***
Args:
parent : inspector of the shell
"""
target = property(lambda self: self.__watchedWidget)
data = property(lambda self: self.__items)
logger = property(lambda self: self.__inspector.Scratch)
shell = property(lambda self: self.__inspector.rootshell)
def __init__(self, parent, **kwargs):
_ListCtrl.__init__(self, parent,
style=wx.LC_REPORT|wx.LC_HRULES, **kwargs)
self.__inspector = parent
self.__watchedWidget = None
self.Font = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
self.alist = ( # assoc list of column names
("typeId", 62),
("typeName", 200),
("source", 200),
)
for k, (header, w) in enumerate(self.alist):
self.InsertColumn(k, header, width=w)
self.__dir = True # sort direction
self.__items = []
## self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_LIST_COL_CLICK, self.OnSortItems)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnItemActivated) # left-dclick
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
def OnDestroy(self, evt):
self.unwatch()
evt.Skip()
## --------------------------------
## Event-watcher wrapper interface
## --------------------------------
ew.buildWxEventMap() # build ew._eventBinders and ew._eventIdMap
ew.addModuleEvents(aui) # + some additives
ew.addModuleEvents(stc)
## Events that should not be watched by default
ew._noWatchList = [
wx.EVT_PAINT,
wx.EVT_NC_PAINT,
wx.EVT_ERASE_BACKGROUND,
wx.EVT_IDLE,
wx.EVT_UPDATE_UI,
wx.EVT_UPDATE_UI_RANGE,
wx.EVT_TOOL,
wx.EVT_TOOL_RANGE, # menu items (typeId=10018)
wx.EVT_MENU,
]
@staticmethod
def get_name(event):
return ew._eventIdMap.get(event, 'Unknown')
@staticmethod
def get_binder(event):
return next(x for x in ew._eventBinders if x.typeId == event)
@staticmethod
def get_watchlist():
"""All watched event binders except noWatchList"""
return filter(lambda v: v not in ew._noWatchList,
ew._eventBinders)
def get_actions(self, event, widget=None):
"""Wx.PyEventBinder and the handlers"""
widget = widget or self.target
if widget:
try:
handlers = widget.__event_handler__[event]
return [a for a in handlers if a != self.onWatchedEvent]
except KeyError:
print("- No such event: {}".format(event))
def watch(self, widget):
"""Begin watching"""
if not isinstance(widget, wx.Object):
return
self.unwatch()
self.clear()
self.__watchedWidget = widget
ssmap = self.dump(widget, verbose=1)
for binder in self.get_watchlist():
widget.Bind(binder, self.onWatchedEvent)
if binder.typeId in ssmap:
self.append(binder.typeId)
self.__inspector.handler("add_page", self)
self.shell.handler("monitor_begin", self.target)
def unwatch(self):
"""End watching"""
if not self.target:
return
for binder in self.get_watchlist():
if not self.target.Unbind(binder, handler=self.onWatchedEvent):
print("- Failed to unbind {}:{}".format(binder.typeId, binder))
self.shell.handler("monitor_end", self.target)
self.__watchedWidget = None
def onWatchedEvent(self, evt):
if self:
self.update(evt)
evt.Skip()
def dump(self, widget, verbose=True):
"""Dump all event handlers bound to the widget"""
exclusions = [x.typeId for x in ew._noWatchList]
ssmap = {}
for event in sorted(widget.__event_handler__):
actions = self.get_actions(event)
if event not in exclusions and actions:
ssmap[event] = actions
if verbose:
name = self.get_name(event)
values = ('\n'+' '*41).join(where(a) for a in actions)
print("{:8d}:{:32s}{!s}".format(event, name, values))
return ssmap
def hook(self, evt):
actions = self.get_actions(evt.EventType)
for f in actions or []:
self.__inspector.debugger.trace(f, evt)
## --------------------------------
## Actions for event-logger items
## --------------------------------
def OnItemActivated(self, evt):
item = self.__items[evt.Index]
attribs = item[-1]
wx.CallAfter(wx.TipWindow, self, attribs, 512)
self.__inspector.handler("put_scratch", attribs)
def update(self, evt):
event = evt.EventType
obj = evt.EventObject
name = self.get_name(event)
source = ew._makeSourceString(obj)
attribs = ew._makeAttribString(evt)
if wx.VERSION < (4,1,0): # ignore self insert
if event == wx.EVT_LIST_INSERT_ITEM.typeId\
and obj is self:
return
for i, item in enumerate(self.__items):
if item[0] == event:
item[1:] = [name, source, attribs]
break
else:
i = len(self.__items)
item = [event, name, source, attribs]
self.__items.append(item)
self.InsertItem(i, event)
for j, v in enumerate(item[:-1]):
self.SetItem(i, j, str(v))
if i == self.FocusedItem:
self.__inspector.handler("put_scratch", attribs)
if self.IsItemChecked(i):
self.CheckItem(i, False)
self.hook(evt)
if self.GetItemBackgroundColour(i) != wx.Colour('yellow'):
## Don't run out of all timers and get warnings
self.SetItemBackgroundColour(i, "yellow")
def reset_color():
if self:
self.SetItemBackgroundColour(i, 'white')
wx.CallLater(1000, reset_color)
def clear(self):
self.DeleteAllItems()
self.__items = []
def append(self, event, bold=True):
if event in (item[0] for item in self.__items):
return
i = len(self.__items)
name = self.get_name(event)
item = [event, name, '-', 'no data']
self.__items.append(item)
self.InsertItem(i, event)
for j, v in enumerate(item[:-1]):
self.SetItem(i, j, str(v))
if bold:
self.SetItemFont(i, self.Font.Bold())
def OnSortItems(self, evt): #<wx._controls.ListEvent>
n = self.ItemCount
lc = [self.__items[j] for j in range(n) if self.IsItemChecked(j)]
ls = [self.__items[j] for j in range(n) if self.IsSelected(j)]
f = self.__items[self.FocusedItem]
col = evt.GetColumn()
self.__dir = not self.__dir
self.__items.sort(key=lambda v: v[col], reverse=self.__dir) # sort data
for i, item in enumerate(self.__items):
for j, v in enumerate(item[:-1]):
self.SetItem(i, j, str(v))
self.CheckItem(i, item in lc) # check
self.Select(i, item in ls) # seleciton
if self.get_actions(item[0]):
self.SetItemFont(i, self.Font.Bold())
else:
self.SetItemFont(i, self.Font)
self.Focus(self.__items.index(f)) # focus (one)
## def OnMotion(self, evt): #<wx._core.MouseEvent>
## i, flag = self.HitTest(evt.Position)
## if i >= 0:
## item = self.__items[i]
## evt.Skip()
if __name__ == "__main__":
import mwx
app = wx.App()
frm = mwx.Frame(None)
if 1:
self = frm.inspector
frm.mon = EventMonitor(self)
frm.mon.Show(0)
self.rootshell.write("self.mon.watch(self.mon)")
self.Show()
frm.Show()
app.MainLoop()
|
import please2.reg_cmd as reg_cmd
from ..cmd_base import Command, Match
from .cmd_git_util import make_error_result, get_ws_modifs_tree, git_diff_layer_name
from please2.util.args import get_positional_after
class CommandGitLsDiffsWsCache(Command):
def help(self):
return self.key() + ' [@ <dir>]'
def opt_keys(self):
return set(['@'])
def key(self):
return 'git diffs ws-cache'
def layer_name(self):
return git_diff_layer_name()
def run_match(self, args, params):
root = get_ws_modifs_tree(args, params, ws_cache=True, cache_local=False)
return Match({self.layer_name():root})
reg_cmd.register_command(CommandGitLsDiffsWsCache())
|
from django.contrib import admin
from django.utils import timezone
from django.utils.html import format_html
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from .models import ProblemReport
from .utils import inform_user_problem_resolved
class ProblemReportAdmin(admin.ModelAdmin):
date_hierarchy = 'timestamp'
raw_id_fields = ('message', 'user',)
list_filter = ('auto_submitted', 'resolved', 'kind')
list_display = (
'kind', 'timestamp', 'admin_link_message',
'auto_submitted', 'resolved',
)
def get_queryset(self, request):
qs = super().get_queryset(request)
qs = qs.select_related('message')
return qs
def admin_link_message(self, obj):
return format_html('<a href="{}">{}</a>',
reverse('admin:foirequest_foimessage_change',
args=(obj.message_id,)), str(obj.message))
def save_model(self, request, obj, form, change):
if 'resolved' in form.changed_data and obj.resolved:
if not obj.resolution_timestamp:
obj.resolution_timestamp = timezone.now()
sent = inform_user_problem_resolved(obj)
if sent:
self.message_user(
request, _('User will be notified of resolution')
)
super().save_model(request, obj, form, change)
admin.site.register(ProblemReport, ProblemReportAdmin)
|
from time import time
import glm
from itbl import Ray, Shader
from itbl.accelerators import SDF, BVHAccel
from itbl.cameras import TrackballCamera
from itbl.shapes import Box
from itbl.util import get_color, get_data
from itbl.viewer import Application, Viewer
from itbl.viewer.backend import *
import itbl._itbl as _itbl
import time
from kinorrt.search_space import SearchSpace
from kinorrt.mechanics.contact_kinematics import *
import random
from kinorrt.mechanics.stability_margin import *
from kinorrt.rrt import RRTManipulation
from kinorrt.mechanics.traj_optim import *
OBJECT_SHAPE = [1.75, 1, 1.5, 0.75]
HALLWAY_W = 2.5
BLOCK_H = 7
BLOCK_W = 5
np.seterr(divide='ignore')
np.set_printoptions(suppress=True, precision=4, linewidth=210)
def print_opengl_error():
err = glGetError()
if (err != GL_NO_ERROR):
print('GLError: ', gluErrorString(err))
def config2trans(q):
q = q.flatten()
a = q[2]
g = np.identity(3)
R = np.array([[np.cos(a),-np.sin(a)],[np.sin(a), np.cos(a)]])
g[0:2,0:2] = R
g[0:2,-1] = q[0:2]
return g
class iTM2d(Application):
def __init__(self, object_shape):
# Initialize scene.
super(iTM2d, self).__init__(None)
self.mesh = Box(1.0, 0.5, 0.2)
self.light_box = Box(0.2, 0.2, 0.2)
self.object_shape = object_shape
def init(self):
super(iTM2d, self).init()
# Basic lighting shader.
vertex_source = os.path.join(get_data(), 'shader', 'basic_lighting.vs')
fragment_source = os.path.join(get_data(), 'shader', 'basic_lighting.fs')
self.basic_lighting_shader = Shader(vertex_source, fragment_source)
# Lamp shader.
vertex_source = os.path.join(get_data(), 'shader', 'flat.vs')
fragment_source = os.path.join(get_data(), 'shader', 'flat.fs')
self.lamp_shader = Shader(vertex_source, fragment_source)
# Normal shader.
vertex_source = os.path.join(get_data(), 'shader', 'normals.vs')
fragment_source = os.path.join(get_data(), 'shader', 'normals.fs')
geometry_source = os.path.join(get_data(), 'shader', 'normals.gs')
self.normal_shader = Shader(vertex_source, fragment_source, geometry_source)
# Trackball camera.
self.camera = TrackballCamera(radius=50)
# Toggle variables.
self.draw_mesh = True
self.draw_wireframe = True
self.draw_normals = False
def init2(self):
# C++ OpenGL.
_itbl.loadOpenGL()
# 2D shader.
vertex_source = os.path.join(get_data(), 'shader', '2d.vs')
fragment_source = os.path.join(get_data(), 'shader', '2d.fs')
self.flat_shader = Shader(vertex_source, fragment_source)
# Object
self.env_contacts = None
self.manip_contacts = None
self.env_contacts = None
self.manifold = None
self.v_m = None
self.counter = 0
self.targets = in_hand_targets(self.object_shape)
self.collision_manager = in_hand()
self.all_configs_on = False
self.step_on = False
self.path_on = False
self.manip_p = None
self.next_manip_p = None
def target_T(self,T0,T1):
self.T0 = T0
self.T1 = T1
def draw_manifold(self):
if self.manifold is None:
return
glPointSize(5)
manifold = self.manifold
for i in range(len(manifold.depths)):
glBegin(GL_POINTS)
cp = manifold.contact_points[i]
glVertex3f(cp[0], cp[1], 1)
glEnd()
glBegin(GL_LINES)
d = manifold.depths[i]
n = manifold.normals[i]
cq = cp - d * n
glVertex3f(cp[0], cp[1], 1)
glVertex3f(cq[0], cq[1], 1)
glEnd()
def draw_ground(self):
glBegin(GL_LINES)
# ground line
glVertex3f(-10, 0, -1)
glVertex3f(10, 0, -1)
# hashes
for x in np.arange(-10, 10, 0.1):
glVertex3f(x, 0, -1)
glVertex3f(x - 0.1, -0.1, -1)
glEnd()
def draw_grid(self, size, step):
glBegin(GL_LINES)
glColor3f(0.3, 0.3, 0.3)
for i in np.arange(step, size, step):
glVertex3f(-size, i, 0) # lines parallel to X-axis
glVertex3f(size, i, 0)
glVertex3f(-size, -i, 0) # lines parallel to X-axis
glVertex3f(size, -i, 0)
glVertex3f(i, -size, 0) # lines parallel to Z-axis
glVertex3f(i, size, 0)
glVertex3f(-i, -size, 0) # lines parallel to Z-axis
glVertex3f(-i, size, 0)
# x-axis
glColor3f(0.5, 0, 0)
glVertex3f(-size, 0, 0)
glVertex3f(size, 0, 0)
# z-axis
glColor3f(0, 0, 0.5)
glVertex3f(0, -size, 0)
glVertex3f(0, size, 0)
glEnd()
def render(self):
glClearColor(0.2, 0.3, 0.3, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
glEnable(GL_MULTISAMPLE)
glEnable(GL_BLEND)
# glEnable(GL_CULL_FACE)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
self.basic_lighting_shader.use()
print_opengl_error()
model = glm.mat4(1.0)
self.basic_lighting_shader.set_mat4('model', np.asarray(model))
view = self.camera.get_view()
self.basic_lighting_shader.set_mat4('view', np.asarray(view))
projection = glm.perspective(glm.radians(45.0), 1200. / 900, 0.1, 100.0)
self.basic_lighting_shader.set_mat4('projection', np.asarray(projection))
# colors
# self.basic_lighting_shader.set_vec3('objectColor', np.array([1.0, 0.5, 0.31], 'f'))
self.basic_lighting_shader.set_vec3('lightColor', np.array([1.0, 1.0, 1.0], 'f'))
# light
lightPos = glm.vec3([1.00, 1.75, 10.0])
self.basic_lighting_shader.set_vec3('lightPos', np.asarray(lightPos))
# camera
cameraPos = glm.vec3(glm.column(glm.inverse(view), 3))
self.basic_lighting_shader.set_vec3('viewPos', np.asarray(cameraPos))
# Draw object.
if self.draw_mesh:
# Draw obstacles.
self.basic_lighting_shader.set_vec3('objectColor', get_color('gray'))
self.collision_manager.draw(self.basic_lighting_shader.id, True, True)
# Draw object.
self.basic_lighting_shader.set_vec3('objectColor', get_color('clay'))
for target in self.targets:
target.draw3d(self.basic_lighting_shader.id)
# Draw normals.
self.normal_shader.use()
self.normal_shader.set_mat4('model', np.asarray(model))
self.normal_shader.set_mat4('view', np.asarray(view))
self.normal_shader.set_mat4('projection', np.asarray(projection))
if self.draw_normals:
self.mesh.draw(self.normal_shader)
# Draw edges and light.
self.lamp_shader.use()
self.lamp_shader.set_mat4('model', np.asarray(model))
self.lamp_shader.set_mat4('view', np.asarray(view))
self.lamp_shader.set_mat4('projection', np.asarray(projection))
self.lamp_shader.set_vec3('objectColor', np.ones((3, 1), 'float32'))
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
if self.draw_wireframe:
# Draw object.
for target in self.targets:
target.draw3d(self.lamp_shader.id)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
light_model = glm.mat4(1.0)
light_model = glm.translate(light_model, lightPos)
self.lamp_shader.set_mat4('model', np.asarray(light_model))
# self.light_box.draw(self.lamp_shader)
self.lamp_shader.set_mat4('model', np.asarray(model))
self.lamp_shader.set_vec3('objectColor', get_color('teal'))
model = glm.mat4(1.0)
self.lamp_shader.set_vec3('objectColor', np.ones((3, 1), 'float32'))
self.lamp_shader.set_mat4('model', np.asarray(model))
# self.draw_grid(5, 0.25)
def render2(self):
glClearColor(0.2, 0.3, 0.3, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
glEnable(GL_MULTISAMPLE)
# glEnable(GL_BLEND)
# glEnable(GL_CULL_FACE)
# glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
# glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
self.flat_shader.use()
model = glm.mat4(1.0)
self.flat_shader.set_mat4('model', np.asarray(model))
view = glm.mat4(1.0)
self.flat_shader.set_mat4('view', np.asarray(view))
aspect_ratio = 800. / 600.
d = 10
ortho = glm.ortho(-d * aspect_ratio, d * aspect_ratio, -d, d, -100.0, 100.0)
# ortho = glm.ortho(-2*aspect_ratio, 2*aspect_ratio, -2, 2, -100.0, 100.0)
self.flat_shader.set_mat4('projection', np.asarray(ortho))
self.flat_shader.set_vec3('offset', np.zeros((3, 1), 'float32'))
self.flat_shader.set_float('scale', 1.0)
self.flat_shader.set_vec3('objectColor', np.ones((3, 1), 'float32'))
# self.draw_grid(5, 0.25)
# Draw obstacles.
self.flat_shader.set_vec3('objectColor', get_color('gray'))
self.collision_manager.draw(self.flat_shader.id, True, False)
if self.step_on:
# Draw object.
new_m = point_manipulator()
if self.counter >= len(self.path):
self.counter = 0
self.config = self.path[self.counter]
self.manip_p = self.mnp_path[self.counter]
if self.manip_p is not None:
for mnp in self.manip_p:
p = mnp.p
p = p[0:2]
new_m.update_config(np.array(p),self.config)
self.flat_shader.set_vec3('objectColor', get_color('red'))
new_m.obj.draw2d(self.flat_shader.id, True)
self.flat_shader.set_vec3('objectColor', get_color('clay'))
T2 = config2trans(np.array(self.config))
T3 = np.identity(4)
T3[0:2, 3] = T2[0:2, 2]
T3[0:2, 0:2] = T2[0:2, 0:2]
self.targets[0].transform()[:, :] = np.dot(T3, self.T0)
self.targets[0].draw2d(self.flat_shader.id, True)
self.targets[1].transform()[:, :] = np.dot(T3, self.T1)
self.targets[1].draw2d(self.flat_shader.id, True)
# print(self.counter, len(self.path))
time.sleep(0.07)
self.counter += 1
if self.path_on:
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
for i in range(len(self.path)):
self.flat_shader.set_vec3('objectColor', get_color('clay'))
target_config = self.path[i]
T2 = config2trans(np.array(target_config))
T3 = np.identity(4)
T3[0:2, 3] = T2[0:2, 2]
T3[0:2, 0:2] = T2[0:2, 0:2]
self.targets[0].transform()[:, :] = np.dot(T3, self.T0)
self.targets[0].draw2d(self.flat_shader.id, True)
self.targets[1].transform()[:, :] = np.dot(T3, self.T1)
self.targets[1].draw2d(self.flat_shader.id, True)
if self.all_configs_on:
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# show all nodes
for node in self.nodes:
self.flat_shader.set_vec3('objectColor', get_color('clay'))
target_config = np.array(node)
T2 = config2trans(target_config)
T3 = np.identity(4)
T3[0:2, 3] = T2[0:2, 2]
T3[0:2, 0:2] = T2[0:2, 0:2]
self.targets[0].transform()[:, :] = np.dot(T3, self.T0)
self.targets[0].draw2d(self.flat_shader.id, True)
self.targets[1].transform()[:, :] = np.dot(T3, self.T1)
self.targets[1].draw2d(self.flat_shader.id, True)
def on_key_press2(self, key, scancode, action, mods):
if key == glfw.KEY_C and action == glfw.PRESS:
self.step_on = False
self.path_on = False
self.all_configs_on = False
if key == glfw.KEY_T and action == glfw.PRESS:
self.step_on = True
if key == glfw.KEY_A and action == glfw.PRESS:
self.all_configs_on = True
if key == glfw.KEY_P and action == glfw.PRESS:
self.path_on = True
def on_key_press(self, key, scancode, action, mods):
pass
# def on_mouse_press(self, x, y, button, modifiers):
# pass
# def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
# pass
def on_mouse_press(self, x, y, button, modifiers):
x = 2.0 * (x / 800.0) - 1.0
y = 2.0 * (y / 600.0) - 1.0
if button == 1: # left click
self.camera.mouse_roll(x, y, False)
if button == 4: # right click
self.camera.mouse_zoom(x, y, False)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
x = 2.0 * (x / 800.0) - 1.0
y = 2.0 * (y / 600.0) - 1.0
if buttons == 1: # left click
self.camera.mouse_roll(x, y)
if buttons == 4: # right click
self.camera.mouse_zoom(x, y)
def get_path(self, path, mnp_path):
self.path = path
self.mnp_path = mnp_path
def get_nodes(self, nodes):
self.nodes = nodes
def get_tree(self, tree):
self.tree = tree
def in_hand():
manager = _itbl.CollisionManager2D()
wall1 = _itbl.Rectangle(3, 0.5, 2, 0.05)
wall2 = _itbl.Rectangle(0.2,0.8,2,0.05)
wall1.transform()[0:3, 3] = np.array([0, 1.75, 0]).reshape(wall1.transform()[0:3, 3].shape)
wall2.transform()[0:3, 3] = np.array([0, 1.35, 0]).reshape(wall1.transform()[0:3, 3].shape)
manager.add(wall1)
# manager.add(wall2)
return manager
def in_hand_targets(object_shapes):
targets = []
wall1 = _itbl.Rectangle(object_shapes[0][0], object_shapes[0][1], 2, 0.05)
wall2= _itbl.Rectangle(object_shapes[1][0], object_shapes[1][1], 2, 0.05)
wall1.transform()[0:3, 3] = np.array([0, object_shapes[0][1]/2 , 0]).reshape(wall1.transform()[0:3, 3].shape)
wall2.transform()[0:3, 3] = np.array([0, -object_shapes[1][1]/2 , 0]).reshape(wall2.transform()[0:3, 3].shape)
targets.append(wall1)
targets.append(wall2)
return targets
class in_hand_part(object):
def __init__(self, objs, object_shapes):
self.objs = objs
self.object_shapes = object_shapes
self.T0 = np.copy(self.objs[0].transform())
self.T1 = np.copy(self.objs[1].transform())
def update_config(self, x):
T2 = config2trans(np.array(x))
T3 = np.identity(4)
T3[0:2, 3] = T2[0:2, 2]
T3[0:2, 0:2] = T2[0:2, 0:2]
self.objs[0].transform()[:, :] = np.dot(T3,self.T0)
self.objs[1].transform()[:, :] = np.dot(T3, self.T1)
return
def contacts2objframe(self, w_contacts, x):
contacts = []
g_inv = inv_g_2d(config2trans(np.array(x)))
# the contacts are wrt the object frame
for c in w_contacts:
cp = np.array(c.p)
cn = np.array(c.n)
cp_o = np.dot(g_inv,np.concatenate([cp,[1]]))
cn_o = np.dot(g_inv[0:2,0:2], cn)
ci = Contact(cp_o[0:2], cn_o, c.d)
contacts.append(ci)
return contacts
def sample_contacts(self, npts):
return sample_finger_contact_inhand(self.object_shapes, npts)
def sample_finger_contact_inhand(object_shapes, npts):
w1 = object_shapes[0][0]
l1 = object_shapes[0][1]
w2 = object_shapes[1][0]
l2 = object_shapes[1][1]
contacts = []
finger_sides = np.random.choice([0,1,2,3,4,5,6,7], npts)
for side in finger_sides:
if side == 0:
n = np.array([0, -1])
p = np.array([w1 * (np.random.random() - 0.5), l1])
elif side == 1:
n = np.array([-1, 0])
p = np.array([w1/2,l1*np.random.random()])
elif side == 2:
n = np.array([0, 1])
p = np.array([w2/2 + 0.5*(w1-w2)*np.random.random(), 0])
elif side == 3:
n = np.array([-1, 0])
p = np.array([w2/2, -l2*np.random.random()])
elif side == 4:
n = np.array([0, 1])
p = np.array([w2*(np.random.random() - 0.5), -l2])
elif side == 5:
n = np.array([1, 0])
p = np.array([-w2/2, -l2*np.random.random()])
elif side == 6:
n = np.array([0, 1])
p = np.array([-(w2/2 + 0.5*(w1-w2)*np.random.random()), 0])
elif side == 7:
n = np.array([1, 0])
p = np.array([-w1/2,l1*np.random.random()])
contact = Contact(p, n, 0.0)
contacts.append(contact)
return contacts
class in_hand_environment(object):
def __init__(self, collision_manager):
self.collision_manager = collision_manager
def check_collision(self, target, target_config):
# contacts are in the world frame
target.update_config(target_config)
manifold = self.collision_manager.collide(target.objs[0])
if_collide = sum(np.array(manifold.depths) < 0.015) != 0
n_pts = len(manifold.contact_points)
contacts = []
# the contacts are wrt the object frame
for i in range(n_pts):
if manifold.depths[i] >= 0.015:
continue
cp = manifold.contact_points[i]
cn = manifold.normals[i]
ci = Contact(cp, cn, manifold.depths[i])
contacts.append(ci)
manifold = self.collision_manager.collide(target.objs[1])
if sum(np.array(manifold.depths) < 0.015) != 0:
if_collide = True
n_pts = len(manifold.contact_points)
# the contacts are wrt the object frame
for i in range(n_pts):
if manifold.depths[i] >= 0.015:
continue
cp = manifold.contact_points[i]
cn = manifold.normals[i]
ci = Contact(cp, cn, manifold.depths[i])
contacts.append(ci)
return if_collide, contacts
def test_kinorrt_cases(stability_solver, max_samples = 100):
viewer = Viewer()
_itbl.loadOpenGL()
step_length = 2
neighbor_r = 5
dist_cost = 10
object_shapes = [[1,0.5],[0.5,0.5]]
X_dimensions = np.array([(-1.5, 1.5), (-1.5, 2), (-1.5*np.pi, 1.5*np.pi)])
x_init = (0,0,0)
x_goal = (0,0,np.pi)
world_key = 'vert'
dist_weight = 1
manipulator = doublepoint_manipulator(np.array([[-1.5,-1.5,0.,-1.5],[-0.,1.5,1.5,1.5]]))
mnp_fn_max = 100
goal_kch = [0.1, 0.1, 1]
app = iTM2d(object_shapes)
viewer.set_renderer(app)
viewer.init()
X = SearchSpace(X_dimensions)
the_object = in_hand_part(app.targets,object_shapes)
app.target_T(the_object.T0, the_object.T1)
envir = in_hand_environment(app.collision_manager)
rrt_tree = RRTManipulation(X, x_init, x_goal, envir, the_object, manipulator,
max_samples, neighbor_r, world_key)
rrt_tree.env_mu = 0.8
rrt_tree.mnp_mu = 0.8
rrt_tree.mnp_fn_max = mnp_fn_max
rrt_tree.dist_weight = dist_weight
rrt_tree.cost_weight[0] = dist_cost
rrt_tree.step_length = step_length
rrt_tree.goal_kch = goal_kch
rrt_tree.initialize_stability_margin_solver(stability_solver)
t_start = time.time()
init_mnp = [Contact((-0.5,0.25),(1,0),0),Contact((0.5,0.25),(-1,0),0)]
# rrt_tree.x_goal = (0,0,np.pi/2)
# path, mnp_path = rrt_tree.search(init_mnp)
rrt_tree.x_goal = (0, 0, np.pi)
paths = rrt_tree.search(init_mnp)
t_end = time.time()
print('time:', t_end - t_start)
whole_path = []
envs = []
mnps = []
modes = []
for q in paths[2][2:]:
ps = rrt_tree.trees[0].edges[q].path
ps.reverse()
m = np.array(rrt_tree.trees[0].edges[q].mode)
current_envs = []
current_modes = []
current_path = []
mnp = rrt_tree.trees[0].edges[q].manip
for p in ps:
_, env = rrt_tree.check_collision(p)
if len(mnp) + len(env) != len(m):
if len(mnp) + len(env) == sum(m != CONTACT_MODE.LIFT_OFF):
m = m[m != CONTACT_MODE.LIFT_OFF]
else:
print('env contact error')
continue
current_modes.append(m)
current_path.append(p)
current_envs.append(env)
current_mnps = [mnp] * len(current_path)
whole_path += current_path
envs += current_envs
modes += current_modes
mnps += current_mnps
print(whole_path, envs, modes, mnps)
# app.get_path(paths[0],mnps)
results = traj_optim_static((whole_path, envs, modes, mnps), rrt_tree)
app.get_path(np.array(results).reshape(-1, 3), mnps)
app.get_nodes(rrt_tree.trees[0].nodes)
app.get_tree(rrt_tree)
viewer.start()
return
stability_solver = StabilityMarginSolver()
for i in [4]:
seed_number = i*1000
random.seed(seed_number)
np.random.seed(seed_number)
ti = test_kinorrt_cases(stability_solver, max_samples=1000)
|
from .functions_for_in_time_compile import *
|
# Copyright 2020 The Backstage Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mkdocs.plugins import BasePlugin
from .parser import Parser
from .merger import Merger
class MonorepoPlugin(BasePlugin):
def __init__(self):
self.parser = None
self.merger = None
self.originalDocsDir = None
self.resolvedPaths = []
self.files_source_dir = {}
def on_config(self, config):
# If no 'nav' defined, we don't need to run.
if not config.get('nav'):
return config
# Handle !import statements
self.parser = Parser(config)
resolvedNav = self.parser.resolve()
resolvedPaths = self.parser.getResolvedPaths()
config['nav'] = resolvedNav
# Generate a new "docs" directory
self.merger = Merger(config)
for alias, docs_dir, yaml_file in resolvedPaths:
self.merger.append(alias, docs_dir)
new_docs_dir = self.merger.merge()
# Update the docs_dir with our temporary one!
self.originalDocsDir = config['docs_dir']
config['docs_dir'] = new_docs_dir
# Store resolved paths for later.
self.resolvedPaths = resolvedPaths
# Store source directory of copied files for later
self.files_source_dir = self.merger.getFilesSourceFolder()
return config
def on_pre_page(self, page, config, files):
# Update page source attribute to point to source file
# Only in case any files were moved.
if len(self.files_source_dir) > 0:
if page.file.abs_src_path in self.files_source_dir:
page.file.abs_src_path = self.files_source_dir[page.file.abs_src_path]
return page
def on_serve(self, server, config, **kwargs):
# We didn't preprocess this repo / create a new docs directory
# so we don't need to watch the original docs dir.
if self.originalDocsDir is None:
return
# Support mkdocs < 1.2
if hasattr(server, 'watcher'):
buildfunc = list(server.watcher._tasks.values())[0]['func']
# still watch the original docs/ directory
server.watch(self.originalDocsDir, buildfunc)
# watch all the sub docs/ folders
for _, docs_dir, yaml_file in self.resolvedPaths:
server.watch(yaml_file, buildfunc)
server.watch(docs_dir, buildfunc)
else:
# still watch the original docs/ directory
server.watch(self.originalDocsDir)
# watch all the sub docs/ folders
for _, docs_dir, yaml_file in self.resolvedPaths:
server.watch(yaml_file)
server.watch(docs_dir)
def post_build(self, config):
self.merger.cleanup()
|
# SPDX-FileCopyrightText: Copyright (c) 2022 Daniel Griswold
#
# SPDX-License-Identifier: MIT
"""
`sdp31`
================================================================================
CircuitPython helper library for the SDP31 differential pressure sensor
* Author(s): Daniel Griswold
Implementation Notes
--------------------
**Hardware:**
* 'Sparkfun SparkX Differential Pressure Sensor - SDP31 (Qwiic):
<https://www.sparkfun.com/products/17874>'
Creative Commons images are CC BY 2.0
SparkX Differential Pressure Sensor - SDP31 (Qwiic)
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
# * Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
import time
from adafruit_bus_device.i2c_device import I2CDevice
from micropython import const
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/dgriswo/CircuitPython_sdp31.git"
_SDP31_I2CADDR_DEFAULT = const(0x21)
_SDP31_CONTINUOUS_DIFF_PRESSURE_AVERAGE = b"\x36\x15"
_SDP31_CONTINUOUS_DIFF_PRESSURE = b"\x36\x1E"
_SDP31_STOP_CONTINUOUS_MEASURE = b"\x3F\xF9"
_SDP31_TRIGGER_DIFF_PRESSURE_STRETCH = b"\x37\x2D"
_SDP31_ENTER_SLEEP = b"\x36\x77"
_SDP31_READ_PRODUCT_NUMBER = b"\x36\x7C"
_SDP31_READ_SERIAL_NUMBER = b"\xE1\x02"
_SDP31_CRC8_POLYNOMIAL = const(0x31)
_SDP31_CRC8_INIT = const(0xFF)
_SDP31_WORD_LEN = const(2)
class SDP31:
"""
A driver for the SDP31 differential pressure sensor.
:param ~busio.I2C i2c: The I2C bus the SDP31 is connected to.
:param int address: The I2C address of the device. Defaults to :const: '0x21'
**Quickstart: Importing and using the SDP31 pressure sensor**
Here is one way of importing the `SDP31` class so you
can use it with the name ``sdp31``.
First you will need to import the libraries to use the sensor
.. code-block:: python
import board
import busio
import sdp31
Once this is done you can define your `busio.I2C` object and define your sensor object
.. code-block:: python
i2c = busio.I2C(board.SCL, board.SDA, frequency=100000)
sdp31 = sdp31.SDP31(i2c)
Now you have access to the differential pressure using the
:attr:`differential_pressure` attribute and the temperature using the :attr:`temperature`
.. code-block:: python
diff_pressure = sdp31.differential_pressure
temperature = sdp31.temperature
"""
def __init__(self, i2c, address=_SDP31_I2CADDR_DEFAULT):
"""Initialize the sensor, get the product number and verify that we found a SDP31."""
self._device = I2CDevice(i2c, address)
self._pressure = None
self._temperature = None
self._mode = None
self._continuous_measurement = False
self.soft_reset(i2c)
time.sleep(0.25)
result = self.read_identifiers()
if result == b"\x03\x01\x01\x01": # SDP31
self._pressure_scale_factor = 60
self._temperature_scale_factor = 200
elif result == b"\x03\x01\x02\x01": # SDP32
self._pressure_scale_factor = 240
self._temperature_scale_factor = 200
else:
raise RuntimeError("SDP not detected.")
def start_continuous_measurement(self, average=False):
"""
In continuous mode the sensor is measuring at the highest speed
and writes the measurement values to the I2C results buffer, where
the I2C master can read out the value when it requires.
If the ‘average till read’ option is chosen, the sensor averages
all values prior to the read out. This has the benefit that
the user can read out the sensor at its own desired speed, without
losing information, which thus prevents aliasing.
"""
if average is True:
mode = _SDP31_CONTINUOUS_DIFF_PRESSURE_AVERAGE
else:
mode = _SDP31_CONTINUOUS_DIFF_PRESSURE
if self._continuous_measurement is True and mode != self._mode:
raise RuntimeError(
"Continuous measurement already started. Stop measurement before switching modes."
)
if self._continuous_measurement is True and mode is self._mode:
return # Continuous measurement active
with self._device:
self._device.write(bytes(mode))
self._mode = mode
self._continuous_measurement = True
time.sleep(0.02)
def read_measurement(self, sensor):
"""
This command is called when continuous measurement
is running and a temperature or pressure reading is
requested.
"""
if sensor == "pressure":
result = self._i2c_read_words(1)
if sensor == "temperature":
result = self._i2c_read_words(2)
result = result[2:4]
return result
def stop_continuous_measurement(self):
"""
This command stops the continuous measurement and puts the sensor
in idle mode. It powers off the heater and makes the sensor receptive
for another command after 500us. The Stop command is also required
when switching between different continuous measurement commands.
"""
with self._device:
self._device.write(bytes(_SDP31_STOP_CONTINUOUS_MEASURE))
time.sleep(0.005)
self._continuous_measurement = False
def triggered_measurement(self, sensor):
"""
During a triggered measurement the sensor measures both differential
pressure and temperature. The measurement starts directly after the
command has been sent. The command needs to be repeated with every
measurement.
"""
if sensor == "pressure":
result = self._i2c_read_words_from_cmd(
_SDP31_TRIGGER_DIFF_PRESSURE_STRETCH, 0, 1
)
if sensor == "temperature":
result = self._i2c_read_words_from_cmd(
_SDP31_TRIGGER_DIFF_PRESSURE_STRETCH, 0, 2
)
result = result[2:4]
return result
def soft_reset(self, i2c):
"""
This sequence resets the sensor with a separate reset block,
which is as much as possible detached from the rest of the
system on chip.
Note that the I2C address is 0x00, which is the general call
address, and that the command is 8 bit. The reset is
implemented according to the I2C specification.
"""
while not i2c.try_lock():
pass
i2c.writeto(0x0, bytes([0x0006]))
i2c.unlock()
self._continuous_measurement = False
def enter_sleep(self):
"""
Puts the sensor into sleep mode. If continuous
measurement is enabled, it is stopped.
"""
if self._continuous_measurement is True:
self.stop_continuous_measurement()
with self._device:
self._device.write(bytes(_SDP31_ENTER_SLEEP))
def exit_sleep(self):
"""Wakes the sensor from sleep."""
with self._device:
self._device.write(bytes([0x00]))
time.sleep(0.02)
def read_identifiers(self):
"""Returns the product ID for the sensor."""
with self._device:
self._device.write(bytes(_SDP31_READ_PRODUCT_NUMBER))
self._device.write(bytes(_SDP31_READ_SERIAL_NUMBER))
crc_result = bytearray(6)
self._device.readinto(crc_result)
result = bytearray(0)
for i in range(2):
word = [crc_result[3 * i], crc_result[3 * i + 1]]
crc = crc_result[3 * i + 2]
if self._generate_crc(word) != crc:
raise RuntimeError("CRC Error")
result.append(word[0])
result.append(word[1])
return result
def _i2c_read_words(self, reply_size):
"""Read from SDP and CRC results if necessary"""
with self._device:
result = bytearray(reply_size)
self._device.readinto(result)
if not reply_size:
return None
crc_result = bytearray(reply_size * (_SDP31_WORD_LEN + 1))
self._device.readinto(crc_result)
result = bytearray()
for i in range(reply_size):
word = [crc_result[3 * i], crc_result[3 * i + 1]]
crc = crc_result[3 * i + 2]
if self._generate_crc(word) != crc:
raise RuntimeError("CRC Error")
result.append(word[0])
result.append(word[1])
return result
def _i2c_read_words_from_cmd(self, command, delay, reply_size):
"""Run an SDP command query, get a reply and CRC results if necessary"""
with self._device:
self._device.write(bytes(command))
time.sleep(delay)
if not reply_size:
return None
crc_result = bytearray(reply_size * (_SDP31_WORD_LEN + 1))
self._device.readinto(crc_result)
result = bytearray()
for i in range(reply_size):
word = [crc_result[3 * i], crc_result[3 * i + 1]]
crc = crc_result[3 * i + 2]
if self._generate_crc(word) != crc:
raise RuntimeError("CRC Error")
result.append(word[0])
result.append(word[1])
return result
# pylint: disable=no-self-use
def _generate_crc(self, data):
"""8-bit CRC algorithm for checking data"""
crc = _SDP31_CRC8_INIT
# calculates 8-Bit checksum with given polynomial
for byte in data:
crc ^= byte
for _ in range(8):
if crc & 0x80:
crc = (crc << 1) ^ _SDP31_CRC8_POLYNOMIAL
else:
crc <<= 1
return crc & 0xFF
@property
def temperature(self):
"""Reads the temperature from the sensor."""
if self._continuous_measurement is True:
return (
int.from_bytes(self.read_measurement("temperature"), "big")
/ self._temperature_scale_factor
)
return (
int.from_bytes(self.triggered_measurement("temperature"), "big")
/ self._temperature_scale_factor
)
@property
def differential_pressure(self):
"""
Reads the differential pressure from the sensor.
Caution: This sensor's accuracy is +/- 2 C.
"""
if self._continuous_measurement is True:
return (
int.from_bytes(self.read_measurement("pressure"), "big")
/ self._pressure_scale_factor
)
return (
int.from_bytes(self.triggered_measurement("pressure"), "big")
/ self._pressure_scale_factor
)
|
# Generated by Django 3.1.1 on 2020-09-10 17:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("autotasks", "0004_automatedtask_sync_status"),
]
operations = [
migrations.AlterField(
model_name="automatedtask",
name="sync_status",
field=models.CharField(
choices=[
("synced", "Synced With Agent"),
("notsynced", "Waiting On Agent Checkin"),
("pendingdeletion", "Pending Deletion on Agent"),
],
default="synced",
max_length=100,
),
),
]
|
"""
Webcam library. Generally, don't access this library directly. Instead, use
the video.py library.
"""
import cv2
import threading
import time
from video.abstract_cam import AbstractCam
class Webcam(AbstractCam):
def __init__(self, cam_id=0):
self.cam_id = cam_id
self.running = False
self.current_frame = None
def start(self):
self.cam = cv2.VideoCapture(self.cam_id)
self.ct = threading.Thread(target=self._capture_image_thread, name="Webcam")
self.ct.daemon = True
self.running = True
self.ct.start()
def stop(self):
self.running = False
self.ct.join()
self.cam.release()
def read_frame(self):
if self.running is True:
return self.current_frame
def _capture_image_thread(self):
while self.running is True:
success, frame = self.cam.read()
if success:
self.current_frame = frame
time.sleep(0.005)
|
import mxnet as mx
data_shape = (1,3,5,5)
class SimpleData(object):
def __init__(self, data):
self.data = data
data = mx.sym.Variable('data')
conv = mx.sym.Convolution(data=data, kernel=(3,3), pad=(1,1), stride=(1,1), num_filter=1)
mon = mx.mon.Monitor(1)
mod = mx.mod.Module(conv)
mod.bind(data_shapes=[('data', data_shape)])
mod._exec_group.install_monitor(mon)
mod.init_params()
input_data = mx.nd.ones(data_shape)
mod.forward(data_batch=SimpleData([input_data]))
res = mod.get_outputs()[0].asnumpy()
print(res) |
#
# Part I: data collection
# 1. create a database
# 2. create a table
# 3. insert data
#
# %% libraries
import aux
import datetime
# ACTION: show the content of aux
# %% create a database with a table
db_filename = 'tutorial.db'
aux.create_table_geometric_shapes(db_filename)
# ACTION: show that the table is created via termial
# $ sqlitebrowser tutorial.db
# %% add data in the table
data = dict(shape = 'rectangle',
height = 1.0,
width = 1.0,
colour = 'red',
datetime = datetime.datetime.now().__str__())
aux.insert_into_geometric_shapes(db_filename, data)
# ACTION: show that the data is in the table
# NOTE: datetime or commit hash is useful in combination with git so as to
# match each data entry with the software version
# %% add more data
for height in [2.8, 3.7, 5.4]:
for width in [0.5, 1.4, 3.6]:
for colour in ['blue', 'black', 'green']:
data = dict(shape = 'rectangle',
height = height,
width = width,
colour = colour,
datetime = datetime.datetime.now().__str__())
aux.insert_into_geometric_shapes(db_filename, data)
# ACTION: show that the data is in the table. show basic filters
|
"""A setuptools-based script for installing FauxFactory."""
# setuptools is preferred over distutils.
import codecs
import os
from setuptools import find_packages, setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
filename = os.path.join(*paths)
with codecs.open(filename, mode='r', encoding='utf-8') as handle:
return handle.read()
LONG_DESCRIPTION = (read('README.rst') + '\n\n' +
read('AUTHORS.rst') + '\n\n' +
read('HISTORY.rst'))
setup(
name='fauxfactory',
description='Generates random data for your tests.',
long_description=LONG_DESCRIPTION,
version='3.0.2',
author='Og Maciel',
author_email='omaciel@ogmaciel.com',
url='https://github.com/omaciel/fauxfactory',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
include_package_data=True,
keywords='python automation data',
license='Apache 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Testing',
],
test_suite='tests',
)
|
# gamepage.py
# Author: Richard Gibson
#
# Renders individual game pages at /game/<game_code>, where game_code is the
# game converted through util.get_code(). For each category of the game run,
# a table lists the PBs for each runner, sorted by time. The tables
# themselves are sorted by number of PBs. These tables are filled through
# handler.get_gamepage() that returns a list of dictionaries, one for each
# category run. Similar to runnerpage.py, for each dictionary d,
# d['infolist'] is itself a list of dictionaries, one for each runner.
#
import handler
import runs
import util
import logging
import json
from operator import itemgetter
from google.appengine.ext import db
from google.appengine.runtime import DeadlineExceededError
class GamePage( handler.Handler ):
def get( self, game_code ):
try:
user = self.get_user( )
if user == self.OVER_QUOTA_ERROR:
user = None
# Have to take the code of the game code because of percent
# encoded plusses
game_code = util.get_code( game_code )
# Make sure this game exists
game_model = self.get_game_model( game_code )
if not game_model:
self.error( 404 )
self.render( "404.html", user=user )
return
if game_model == self.OVER_QUOTA_ERROR:
self.error( 403 )
self.render( "403.html", user=user )
return
# Find out if this user has run this game
if user is not None:
user_has_run = self.get_user_has_run( user.username,
game_model.game )
if user_has_run == self.OVER_QUOTA_ERROR:
user_has_run = False
else:
user_has_run = False
# Look for a specific category to display
category_code = self.request.get( 'c' )
if category_code:
category_code = util.get_code( category_code )
else:
category_code = None
# Grab the categories with their codes
categories = [ ]
category = None
for this_category in game_model.categories( ):
this_category_code = util.get_code( this_category )
categories.append( dict( category=this_category,
category_code=this_category_code ) )
if category_code == this_category_code:
category = this_category
categories.sort( key=itemgetter( 'category_code' ) )
# Grab the page num
page_num = self.request.get( 'page' )
try:
page_num = int( page_num )
except ValueError:
page_num = 1
gamepage = self.get_gamepage( game_model.game, category, page_num )
if gamepage == self.OVER_QUOTA_ERROR:
self.error( 403 )
self.render( "403.html", user=user )
return
page_num = gamepage['page_num']
# Add gravatar images to the gamepage
d = gamepage['d']
if d is not None:
for run in d['infolist']:
runner = self.get_runner( util.get_code(
run['username'] ) )
if runner == self.OVER_QUOTA_ERROR:
self.error( 403 )
if self.format == 'html':
self.render( "403.html", user=user )
return
if runner is not None:
run['gravatar_url'] = util.get_gravatar_url(
runner.gravatar, size=20 )
if len( d['infolist'] ) <= 0 and page_num == 1:
# Delete this category for the game model
gameinfolist = json.loads( game_model.info )
for i, gameinfo in enumerate( gameinfolist ):
if category == gameinfo['category']:
del gameinfolist[ i ]
logging.info( 'Removed ' + category
+ ' from ' + game_model.game )
if len( gameinfolist ) == 0:
# Remove the game
game = game_model.game
game_model.delete( )
logging.info( game + " deleted" )
self.update_cache_game_model( game_code, None )
# From gamelist in memcache too
cached_gamelists = self.get_cached_gamelists( )
if cached_gamelists is not None:
done = False
for page_num, res in cached_gamelists.iteritems( ):
if done:
break
for i, d in enumerate( res['gamelist'] ):
if d['game'] == game:
del cached_gamelists[ page_num ]['gamelist'][ i ]
done = True
break
self.update_cache_gamelist(
cached_gamelists )
self.error( 404 )
self.render( "404.html", user=user )
return
else:
# Update game
game_model.info = json.dumps( gameinfolist )
game_model.put( )
self.update_cache_game_model( game_code,
game_model )
break
has_prev = False
if page_num > 1:
has_prev = True
if self.format == 'html':
self.render( "gamepage.html", user=user, game=game_model.game,
game_code=game_code, d=d, categories=categories,
user_has_run=user_has_run, has_prev=has_prev,
has_next=gamepage['has_next'], page_num=page_num )
elif self.format == 'json':
if d is None:
self.render_json( categories )
else:
self.render_json( d )
except DeadlineExceededError, msg:
logging.error( msg )
self.error( 403 )
self.render( "deadline_exceeded.html", user=user )
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2004-2020 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Utility functions, toolbars and actions to create profile on images
and stacks of images"""
__authors__ = ["V.A. Sole", "T. Vincent", "P. Knobel", "H. Payno"]
__license__ = "MIT"
__date__ = "12/04/2019"
import weakref
import numpy
from silx.image.bilinear import BilinearImage
from .. import icons
from .. import qt
from . import items
from ..colors import cursorColorForColormap
from . import actions
from .PlotToolButtons import ProfileToolButton, ProfileOptionToolButton
from .ProfileMainWindow import ProfileMainWindow
from silx.utils.deprecation import deprecated
def _alignedFullProfile(data, origin, scale, position, roiWidth, axis, method):
"""Get a profile along one axis on a stack of images
:param numpy.ndarray data: 3D volume (stack of 2D images)
The first dimension is the image index.
:param origin: Origin of image in plot (ox, oy)
:param scale: Scale of image in plot (sx, sy)
:param float position: Position of profile line in plot coords
on the axis orthogonal to the profile direction.
:param int roiWidth: Width of the profile in image pixels.
:param int axis: 0 for horizontal profile, 1 for vertical.
:param str method: method to compute the profile. Can be 'mean' or 'sum'
:return: profile image + effective ROI area corners in plot coords
"""
assert axis in (0, 1)
assert len(data.shape) == 3
assert method in ('mean', 'sum')
# Convert from plot to image coords
imgPos = int((position - origin[1 - axis]) / scale[1 - axis])
if axis == 1: # Vertical profile
# Transpose image to always do a horizontal profile
data = numpy.transpose(data, (0, 2, 1))
nimages, height, width = data.shape
roiWidth = min(height, roiWidth) # Clip roi width to image size
# Get [start, end[ coords of the roi in the data
start = int(int(imgPos) + 0.5 - roiWidth / 2.)
start = min(max(0, start), height - roiWidth)
end = start + roiWidth
if start < height and end > 0:
if method == 'mean':
_fct = numpy.mean
elif method == 'sum':
_fct = numpy.sum
else:
raise ValueError('method not managed')
profile = _fct(data[:, max(0, start):min(end, height), :], axis=1).astype(numpy.float32)
else:
profile = numpy.zeros((nimages, width), dtype=numpy.float32)
# Compute effective ROI in plot coords
profileBounds = numpy.array(
(0, width, width, 0),
dtype=numpy.float32) * scale[axis] + origin[axis]
roiBounds = numpy.array(
(start, start, end, end),
dtype=numpy.float32) * scale[1 - axis] + origin[1 - axis]
if axis == 0: # Horizontal profile
area = profileBounds, roiBounds
else: # vertical profile
area = roiBounds, profileBounds
return profile, area
def _alignedPartialProfile(data, rowRange, colRange, axis, method):
"""Mean of a rectangular region (ROI) of a stack of images
along a given axis.
Returned values and all parameters are in image coordinates.
:param numpy.ndarray data: 3D volume (stack of 2D images)
The first dimension is the image index.
:param rowRange: [min, max[ of ROI rows (upper bound excluded).
:type rowRange: 2-tuple of int (min, max) with min < max
:param colRange: [min, max[ of ROI columns (upper bound excluded).
:type colRange: 2-tuple of int (min, max) with min < max
:param int axis: The axis along which to take the profile of the ROI.
0: Sum rows along columns.
1: Sum columns along rows.
:param str method: method to compute the profile. Can be 'mean' or 'sum'
:return: Profile image along the ROI as the mean of the intersection
of the ROI and the image.
"""
assert axis in (0, 1)
assert len(data.shape) == 3
assert rowRange[0] < rowRange[1]
assert colRange[0] < colRange[1]
assert method in ('mean', 'sum')
nimages, height, width = data.shape
# Range aligned with the integration direction
profileRange = colRange if axis == 0 else rowRange
profileLength = abs(profileRange[1] - profileRange[0])
# Subset of the image to use as intersection of ROI and image
rowStart = min(max(0, rowRange[0]), height)
rowEnd = min(max(0, rowRange[1]), height)
colStart = min(max(0, colRange[0]), width)
colEnd = min(max(0, colRange[1]), width)
if method == 'mean':
_fct = numpy.mean
elif method == 'sum':
_fct = numpy.sum
else:
raise ValueError('method not managed')
imgProfile = _fct(data[:, rowStart:rowEnd, colStart:colEnd], axis=axis + 1,
dtype=numpy.float32)
# Profile including out of bound area
profile = numpy.zeros((nimages, profileLength), dtype=numpy.float32)
# Place imgProfile in full profile
offset = - min(0, profileRange[0])
profile[:, offset:offset + imgProfile.shape[1]] = imgProfile
return profile
def createProfile(roiInfo, currentData, origin, scale, lineWidth, method):
"""Create the profile line for the the given image.
:param roiInfo: information about the ROI: start point, end point and
type ("X", "Y", "D")
:param numpy.ndarray currentData: the 2D image or the 3D stack of images
on which we compute the profile.
:param origin: (ox, oy) the offset from origin
:type origin: 2-tuple of float
:param scale: (sx, sy) the scale to use
:type scale: 2-tuple of float
:param int lineWidth: width of the profile line
:param str method: method to compute the profile. Can be 'mean' or 'sum'
:return: `coords, profile, area, profileName, xLabel`, where:
- coords is the X coordinate to use to display the profile
- profile is a 2D array of the profiles of the stack of images.
For a single image, the profile is a curve, so this parameter
has a shape *(1, len(curve))*
- area is a tuple of two 1D arrays with 4 values each. They represent
the effective ROI area corners in plot coords.
- profileName is a string describing the ROI, meant to be used as
title of the profile plot
- xLabel the label for X in the profile window
:rtype: tuple(ndarray,ndarray,(ndarray,ndarray),str)
"""
if currentData is None or roiInfo is None or lineWidth is None:
raise ValueError("createProfile called with invalide arguments")
# force 3D data (stack of images)
if len(currentData.shape) == 2:
currentData3D = currentData.reshape((1,) + currentData.shape)
elif len(currentData.shape) == 3:
currentData3D = currentData
roiWidth = max(1, lineWidth)
roiStart, roiEnd, lineProjectionMode = roiInfo
if lineProjectionMode == 'X': # Horizontal profile on the whole image
profile, area = _alignedFullProfile(currentData3D,
origin, scale,
roiStart[1], roiWidth,
axis=0,
method=method)
coords = numpy.arange(len(profile[0]), dtype=numpy.float32)
coords = coords * scale[0] + origin[0]
yMin, yMax = min(area[1]), max(area[1]) - 1
if roiWidth <= 1:
profileName = 'Y = %g' % yMin
else:
profileName = 'Y = [%g, %g]' % (yMin, yMax)
xLabel = 'X'
elif lineProjectionMode == 'Y': # Vertical profile on the whole image
profile, area = _alignedFullProfile(currentData3D,
origin, scale,
roiStart[0], roiWidth,
axis=1,
method=method)
coords = numpy.arange(len(profile[0]), dtype=numpy.float32)
coords = coords * scale[1] + origin[1]
xMin, xMax = min(area[0]), max(area[0]) - 1
if roiWidth <= 1:
profileName = 'X = %g' % xMin
else:
profileName = 'X = [%g, %g]' % (xMin, xMax)
xLabel = 'Y'
else: # Free line profile
# Convert start and end points in image coords as (row, col)
startPt = ((roiStart[1] - origin[1]) / scale[1],
(roiStart[0] - origin[0]) / scale[0])
endPt = ((roiEnd[1] - origin[1]) / scale[1],
(roiEnd[0] - origin[0]) / scale[0])
if (int(startPt[0]) == int(endPt[0]) or
int(startPt[1]) == int(endPt[1])):
# Profile is aligned with one of the axes
# Convert to int
startPt = int(startPt[0]), int(startPt[1])
endPt = int(endPt[0]), int(endPt[1])
# Ensure startPt <= endPt
if startPt[0] > endPt[0] or startPt[1] > endPt[1]:
startPt, endPt = endPt, startPt
if startPt[0] == endPt[0]: # Row aligned
rowRange = (int(startPt[0] + 0.5 - 0.5 * roiWidth),
int(startPt[0] + 0.5 + 0.5 * roiWidth))
colRange = startPt[1], endPt[1] + 1
profile = _alignedPartialProfile(currentData3D,
rowRange, colRange,
axis=0,
method=method)
else: # Column aligned
rowRange = startPt[0], endPt[0] + 1
colRange = (int(startPt[1] + 0.5 - 0.5 * roiWidth),
int(startPt[1] + 0.5 + 0.5 * roiWidth))
profile = _alignedPartialProfile(currentData3D,
rowRange, colRange,
axis=1,
method=method)
# Convert ranges to plot coords to draw ROI area
area = (
numpy.array(
(colRange[0], colRange[1], colRange[1], colRange[0]),
dtype=numpy.float32) * scale[0] + origin[0],
numpy.array(
(rowRange[0], rowRange[0], rowRange[1], rowRange[1]),
dtype=numpy.float32) * scale[1] + origin[1])
else: # General case: use bilinear interpolation
# Ensure startPt <= endPt
if (startPt[1] > endPt[1] or (
startPt[1] == endPt[1] and startPt[0] > endPt[0])):
startPt, endPt = endPt, startPt
profile = []
for slice_idx in range(currentData3D.shape[0]):
bilinear = BilinearImage(currentData3D[slice_idx, :, :])
profile.append(bilinear.profile_line(
(startPt[0] - 0.5, startPt[1] - 0.5),
(endPt[0] - 0.5, endPt[1] - 0.5),
roiWidth,
method=method))
profile = numpy.array(profile)
# Extend ROI with half a pixel on each end, and
# Convert back to plot coords (x, y)
length = numpy.sqrt((endPt[0] - startPt[0]) ** 2 +
(endPt[1] - startPt[1]) ** 2)
dRow = (endPt[0] - startPt[0]) / length
dCol = (endPt[1] - startPt[1]) / length
# Extend ROI with half a pixel on each end
roiStartPt = startPt[0] - 0.5 * dRow, startPt[1] - 0.5 * dCol
roiEndPt = endPt[0] + 0.5 * dRow, endPt[1] + 0.5 * dCol
# Rotate deltas by 90 degrees to apply line width
dRow, dCol = dCol, -dRow
area = (
numpy.array((roiStartPt[1] - 0.5 * roiWidth * dCol,
roiStartPt[1] + 0.5 * roiWidth * dCol,
roiEndPt[1] + 0.5 * roiWidth * dCol,
roiEndPt[1] - 0.5 * roiWidth * dCol),
dtype=numpy.float32) * scale[0] + origin[0],
numpy.array((roiStartPt[0] - 0.5 * roiWidth * dRow,
roiStartPt[0] + 0.5 * roiWidth * dRow,
roiEndPt[0] + 0.5 * roiWidth * dRow,
roiEndPt[0] - 0.5 * roiWidth * dRow),
dtype=numpy.float32) * scale[1] + origin[1])
# Convert start and end points back to plot coords
y0 = startPt[0] * scale[1] + origin[1]
x0 = startPt[1] * scale[0] + origin[0]
y1 = endPt[0] * scale[1] + origin[1]
x1 = endPt[1] * scale[0] + origin[0]
if startPt[1] == endPt[1]:
profileName = 'X = %g; Y = [%g, %g]' % (x0, y0, y1)
coords = numpy.arange(len(profile[0]), dtype=numpy.float32)
coords = coords * scale[1] + y0
xLabel = 'Y'
elif startPt[0] == endPt[0]:
profileName = 'Y = %g; X = [%g, %g]' % (y0, x0, x1)
coords = numpy.arange(len(profile[0]), dtype=numpy.float32)
coords = coords * scale[0] + x0
xLabel = 'X'
else:
m = (y1 - y0) / (x1 - x0)
b = y0 - m * x0
profileName = 'y = %g * x %+g ; width=%d' % (m, b, roiWidth)
coords = numpy.linspace(x0, x1, len(profile[0]),
endpoint=True,
dtype=numpy.float32)
xLabel = 'X'
return coords, profile, area, profileName, xLabel
# ProfileToolBar ##############################################################
class ProfileToolBar(qt.QToolBar):
"""QToolBar providing profile tools operating on a :class:`PlotWindow`.
Attributes:
- plot: Associated :class:`PlotWindow` on which the profile line is drawn.
- actionGroup: :class:`QActionGroup` of available actions.
To run the following sample code, a QApplication must be initialized.
First, create a PlotWindow and add a :class:`ProfileToolBar`.
>>> from silx.gui.plot import PlotWindow
>>> from silx.gui.plot.Profile import ProfileToolBar
>>> plot = PlotWindow() # Create a PlotWindow
>>> toolBar = ProfileToolBar(plot=plot) # Create a profile toolbar
>>> plot.addToolBar(toolBar) # Add it to plot
>>> plot.show() # To display the PlotWindow with the profile toolbar
:param plot: :class:`PlotWindow` instance on which to operate.
:param profileWindow: Plot widget instance where to
display the profile curve or None to create one.
:param str title: See :class:`QToolBar`.
:param parent: See :class:`QToolBar`.
"""
# TODO Make it a QActionGroup instead of a QToolBar
_POLYGON_LEGEND = '__ProfileToolBar_ROI_Polygon'
DEFAULT_PROF_METHOD = 'mean'
def __init__(self, parent=None, plot=None, profileWindow=None,
title='Profile Selection'):
super(ProfileToolBar, self).__init__(title, parent)
assert plot is not None
self._plotRef = weakref.ref(plot)
self._overlayColor = None
self._defaultOverlayColor = 'red' # update when active image change
self._method = self.DEFAULT_PROF_METHOD
self._roiInfo = None # Store start and end points and type of ROI
self._profileWindow = profileWindow
"""User provided plot widget in which the profile curve is plotted.
None if no custom profile plot was provided."""
self._profileMainWindow = None
"""Main window providing 2 profile plot widgets for 1D or 2D profiles.
The window provides two public methods
- :meth:`setProfileDimensions`
- :meth:`getPlot`: return handle on the actual plot widget
currently being used
None if the user specified a custom profile plot window.
"""
self.__profileMainWindowNeverShown = True
if self._profileWindow is None:
backend = type(plot._backend)
self._profileMainWindow = ProfileMainWindow(self, backend=backend)
# Actions
self._browseAction = actions.mode.ZoomModeAction(self.plot, parent=self)
self._browseAction.setVisible(False)
self.hLineAction = qt.QAction(icons.getQIcon('shape-horizontal'),
'Horizontal Profile Mode',
self)
self.hLineAction.setToolTip(
'Enables horizontal profile selection mode')
self.hLineAction.setCheckable(True)
self.hLineAction.toggled[bool].connect(self._hLineActionToggled)
self.vLineAction = qt.QAction(icons.getQIcon('shape-vertical'),
'Vertical Profile Mode',
self)
self.vLineAction.setToolTip(
'Enables vertical profile selection mode')
self.vLineAction.setCheckable(True)
self.vLineAction.toggled[bool].connect(self._vLineActionToggled)
self.lineAction = qt.QAction(icons.getQIcon('shape-diagonal'),
'Free Line Profile Mode',
self)
self.lineAction.setToolTip(
'Enables line profile selection mode')
self.lineAction.setCheckable(True)
self.lineAction.toggled[bool].connect(self._lineActionToggled)
self.clearAction = qt.QAction(icons.getQIcon('profile-clear'),
'Clear Profile',
self)
self.clearAction.setToolTip(
'Clear the profile Region of interest')
self.clearAction.setCheckable(False)
self.clearAction.triggered.connect(self.clearProfile)
# ActionGroup
self.actionGroup = qt.QActionGroup(self)
self.actionGroup.addAction(self._browseAction)
self.actionGroup.addAction(self.hLineAction)
self.actionGroup.addAction(self.vLineAction)
self.actionGroup.addAction(self.lineAction)
# Add actions to ToolBar
self.addAction(self._browseAction)
self.addAction(self.hLineAction)
self.addAction(self.vLineAction)
self.addAction(self.lineAction)
self.addAction(self.clearAction)
# Add width spin box to toolbar
self.addWidget(qt.QLabel('W:'))
self.lineWidthSpinBox = qt.QSpinBox(self)
self.lineWidthSpinBox.setRange(1, 1000)
self.lineWidthSpinBox.setValue(1)
self.lineWidthSpinBox.valueChanged[int].connect(
self._lineWidthSpinBoxValueChangedSlot)
self.addWidget(self.lineWidthSpinBox)
self.methodsButton = ProfileOptionToolButton(parent=self, plot=self)
self.__profileOptionToolAction = self.addWidget(self.methodsButton)
# TODO: add connection with the signal
self.methodsButton.sigMethodChanged.connect(self.setProfileMethod)
self.plot.sigInteractiveModeChanged.connect(
self._interactiveModeChanged)
# Enable toolbar only if there is an active image
self.setEnabled(self.plot.getActiveImage(just_legend=True) is not None)
self.plot.sigActiveImageChanged.connect(
self._activeImageChanged)
# listen to the profile window signals to clear profile polygon on close
if self.getProfileMainWindow() is not None:
self.getProfileMainWindow().sigClose.connect(self.clearProfile)
@property
def plot(self):
"""The :class:`.PlotWidget` associated to the toolbar."""
return self._plotRef()
@property
@deprecated(since_version="0.6.0")
def browseAction(self):
return self._browseAction
@property
@deprecated(replacement="getProfilePlot", since_version="0.5.0")
def profileWindow(self):
return self.getProfilePlot()
def getProfilePlot(self):
"""Return plot widget in which the profile curve or the
profile image is plotted.
"""
if self.getProfileMainWindow() is not None:
return self.getProfileMainWindow().getPlot()
# in case the user provided a custom plot for profiles
return self._profileWindow
def getProfileMainWindow(self):
"""Return window containing the profile curve widget.
This can return *None* if a custom profile plot window was
specified in the constructor.
"""
return self._profileMainWindow
def _activeImageChanged(self, previous, legend):
"""Handle active image change: toggle enabled toolbar, update curve"""
if legend is None:
self.setEnabled(False)
else:
activeImage = self.plot.getActiveImage()
# Disable for empty image
self.setEnabled(activeImage.getData(copy=False).size > 0)
# Update default profile color
if isinstance(activeImage, items.ColormapMixIn):
self._defaultOverlayColor = cursorColorForColormap(
activeImage.getColormap()['name'])
else:
self._defaultOverlayColor = 'black'
self.updateProfile()
def _lineWidthSpinBoxValueChangedSlot(self, value):
"""Listen to ROI width widget to refresh ROI and profile"""
self.updateProfile()
def _interactiveModeChanged(self, source):
"""Handle plot interactive mode changed:
If changed from elsewhere, disable drawing tool
"""
if source is not self:
self.clearProfile()
# Uncheck all drawing profile modes
self.hLineAction.setChecked(False)
self.vLineAction.setChecked(False)
self.lineAction.setChecked(False)
if self.getProfileMainWindow() is not None:
self.getProfileMainWindow().hide()
def _hLineActionToggled(self, checked):
"""Handle horizontal line profile action toggle"""
if checked:
self.plot.setInteractiveMode('draw', shape='hline',
color=None, source=self)
self.plot.sigPlotSignal.connect(self._plotWindowSlot)
else:
self.plot.sigPlotSignal.disconnect(self._plotWindowSlot)
def _vLineActionToggled(self, checked):
"""Handle vertical line profile action toggle"""
if checked:
self.plot.setInteractiveMode('draw', shape='vline',
color=None, source=self)
self.plot.sigPlotSignal.connect(self._plotWindowSlot)
else:
self.plot.sigPlotSignal.disconnect(self._plotWindowSlot)
def _lineActionToggled(self, checked):
"""Handle line profile action toggle"""
if checked:
self.plot.setInteractiveMode('draw', shape='line',
color=None, source=self)
self.plot.sigPlotSignal.connect(self._plotWindowSlot)
else:
self.plot.sigPlotSignal.disconnect(self._plotWindowSlot)
def _plotWindowSlot(self, event):
"""Listen to Plot to handle drawing events to refresh ROI and profile.
"""
if event['event'] not in ('drawingProgress', 'drawingFinished'):
return
checkedAction = self.actionGroup.checkedAction()
if checkedAction == self.hLineAction:
lineProjectionMode = 'X'
elif checkedAction == self.vLineAction:
lineProjectionMode = 'Y'
elif checkedAction == self.lineAction:
lineProjectionMode = 'D'
else:
return
roiStart, roiEnd = event['points'][0], event['points'][1]
self._roiInfo = roiStart, roiEnd, lineProjectionMode
self.updateProfile()
@property
def overlayColor(self):
"""The color to use for the ROI.
If set to None (the default), the overlay color is adapted to the
active image colormap and changes if the active image colormap changes.
"""
return self._overlayColor or self._defaultOverlayColor
@overlayColor.setter
def overlayColor(self, color):
self._overlayColor = color
self.updateProfile()
def clearProfile(self):
"""Remove profile curve and profile area."""
self._roiInfo = None
self.updateProfile()
def updateProfile(self):
"""Update the displayed profile and profile ROI.
This uses the current active image of the plot and the current ROI.
"""
image = self.plot.getActiveImage()
if image is None:
return
# Clean previous profile area, and previous curve
self.plot.remove(self._POLYGON_LEGEND, kind='item')
self.getProfilePlot().clear()
self.getProfilePlot().setGraphTitle('')
self.getProfilePlot().getXAxis().setLabel('X')
self.getProfilePlot().getYAxis().setLabel('Y')
self._createProfile(currentData=image.getData(copy=False),
origin=image.getOrigin(),
scale=image.getScale(),
colormap=None, # Not used for 2D data
z=image.getZValue(),
method=self.getProfileMethod())
def _createProfile(self, currentData, origin, scale, colormap, z, method):
"""Create the profile line for the the given image.
:param numpy.ndarray currentData: the image or the stack of images
on which we compute the profile
:param origin: (ox, oy) the offset from origin
:type origin: 2-tuple of float
:param scale: (sx, sy) the scale to use
:type scale: 2-tuple of float
:param dict colormap: The colormap to use
:param int z: The z layer of the image
"""
if self._roiInfo is None:
return
coords, profile, area, profileName, xLabel = createProfile(
roiInfo=self._roiInfo,
currentData=currentData,
origin=origin,
scale=scale,
lineWidth=self.lineWidthSpinBox.value(),
method=method)
profilePlot = self.getProfilePlot()
profilePlot.setGraphTitle(profileName)
profilePlot.getXAxis().setLabel(xLabel)
dataIs3D = len(currentData.shape) > 2
if dataIs3D:
profileScale = (coords[-1] - coords[0]) / profile.shape[1], 1
profilePlot.addImage(profile,
legend=profileName,
colormap=colormap,
origin=(coords[0], 0),
scale=profileScale)
profilePlot.getYAxis().setLabel("Frame index (depth)")
else:
profilePlot.addCurve(coords,
profile[0],
legend=profileName,
color=self.overlayColor)
self.plot.addShape(area[0], area[1],
legend=self._POLYGON_LEGEND,
color=self.overlayColor,
shape='polygon', fill=True,
replace=False, z=z + 1)
self._showProfileMainWindow()
def _showProfileMainWindow(self):
"""If profile window was created by this toolbar,
try to avoid overlapping with the toolbar's parent window.
"""
profileMainWindow = self.getProfileMainWindow()
if profileMainWindow is not None:
if self.__profileMainWindowNeverShown:
# Places the profile window in order to avoid overlapping the plot
self.__profileMainWindowNeverShown = False
winGeom = self.window().frameGeometry()
qapp = qt.QApplication.instance()
screenGeom = qapp.desktop().availableGeometry(self)
spaceOnLeftSide = winGeom.left()
spaceOnRightSide = screenGeom.width() - winGeom.right()
profileWindowWidth = profileMainWindow.frameGeometry().width()
if (profileWindowWidth < spaceOnRightSide):
# Place profile on the right
profileMainWindow.move(winGeom.right(), winGeom.top())
elif(profileWindowWidth < spaceOnLeftSide):
# Place profile on the left
profileMainWindow.move(
max(0, winGeom.left() - profileWindowWidth), winGeom.top())
profileMainWindow.raise_()
profileMainWindow.show()
else:
self.getProfilePlot().show()
self.getProfilePlot().raise_()
def hideProfileWindow(self):
"""Hide profile window.
"""
# this method is currently only used by StackView when the perspective
# is changed
if self.getProfileMainWindow() is not None:
self.getProfileMainWindow().hide()
def setProfileMethod(self, method):
assert method in ('sum', 'mean')
self._method = method
self.updateProfile()
def getProfileMethod(self):
return self._method
def getProfileOptionToolAction(self):
return self.__profileOptionToolAction
class Profile3DToolBar(ProfileToolBar):
def __init__(self, parent=None, stackview=None,
title='Profile Selection'):
"""QToolBar providing profile tools for an image or a stack of images.
:param parent: the parent QWidget
:param stackview: :class:`StackView` instance on which to operate.
:param str title: See :class:`QToolBar`.
:param parent: See :class:`QToolBar`.
"""
# TODO: add param profileWindow (specify the plot used for profiles)
super(Profile3DToolBar, self).__init__(parent=parent,
plot=stackview.getPlot(),
title=title)
self.stackView = stackview
""":class:`StackView` instance"""
self.profile3dAction = ProfileToolButton(
parent=self, plot=self.plot)
self.profile3dAction.computeProfileIn2D()
self.profile3dAction.setVisible(True)
self.addWidget(self.profile3dAction)
self.profile3dAction.sigDimensionChanged.connect(self._setProfileType)
# create the 3D toolbar
self._profileType = None
self._setProfileType(2)
self._method3D = 'sum'
def _setProfileType(self, dimensions):
"""Set the profile type: "1D" for a curve (profile on a single image)
or "2D" for an image (profile on a stack of images).
:param int dimensions: 1 for a "1D" profile or 2 for a "2D" profile
"""
# fixme this assumes that we created _profileMainWindow
self._profileType = "1D" if dimensions == 1 else "2D"
self.getProfileMainWindow().setProfileType(self._profileType)
self.updateProfile()
def updateProfile(self):
"""Method overloaded from :class:`ProfileToolBar`,
to pass the stack of images instead of just the active image.
In 1D profile mode, use the regular parent method.
"""
if self._profileType == "1D":
super(Profile3DToolBar, self).updateProfile()
elif self._profileType == "2D":
stackData = self.stackView.getCurrentView(copy=False,
returnNumpyArray=True)
if stackData is None:
return
self.plot.remove(self._POLYGON_LEGEND, kind='item')
self.getProfilePlot().clear()
self.getProfilePlot().setGraphTitle('')
self.getProfilePlot().getXAxis().setLabel('X')
self.getProfilePlot().getYAxis().setLabel('Y')
self._createProfile(currentData=stackData[0],
origin=stackData[1]['origin'],
scale=stackData[1]['scale'],
colormap=stackData[1]['colormap'],
z=stackData[1]['z'],
method=self.getProfileMethod())
else:
raise ValueError(
"Profile type must be 1D or 2D, not %s" % self._profileType)
def setProfileMethod(self, method):
assert method in ('sum', 'mean')
self._method3D = method
self.updateProfile()
def getProfileMethod(self):
return self._method3D
|
# coding=utf8
import os
import re
import numpy as np
from typing import List
from overrides import overrides
from nltk.corpus import stopwords
from allennlp.data.tokenizers import Token
from .atis.atis_entity_matcher import ATISEntityMatcher
from .atis.atis_sql_entity_matcher import ATISSQLEntityMatcher
from .atis.atis_lambda_calculus_entity_matcher import ATISLambdaCalculusEntityMatcher
from .atis.atis_seq2seq_entity_matcher import ATISSeq2SeqEntityMatcher
from .atis.atis_seq2seq_sql_entity_matcher import ATISSeq2SeqSQLEntityMatcher
from .atis.atis_seq2seq_lambda_calculus_entity_matcher import ATISSeq2SeqLambdaCalculusEntityMatcher
class BasicEntityMatcher():
def process_terminal_rule(self, rule):
# Process terminal
terminal = rule.rhs.strip('[] ')
terminal = terminal.replace("'", "").replace('"', "").replace("_", " ").replace("%", "").replace(":", " : ")
terminal = re.sub(' +', ' ', terminal)
terminal_tokens = terminal.lower().split(" ")
try:
index = terminal_tokens.index(":")
except ValueError:
pass
else:
terminal_tokens = terminal_tokens[:index]
return terminal_tokens
def match(self, question_tokens: List[Token], rules: List,
copy_terminal_set: List, pad_index: int, max_ngram=6):
token_rule_map = list()
stop_words = set(stopwords.words('english'))
for token in question_tokens:
matches = list()
if token.text in stop_words:
matches = [pad_index]
else:
for rule in rules: # Instance of Production Rule
if rule.lhs in copy_terminal_set:
# Process terminal
terminal = rule.rhs.strip('[] ')
terminal = terminal.replace("'", "").replace('"', "").replace("_", " ").replace("%",
"").replace(
":", " ")
terminal = re.sub(' +', ' ', terminal)
terminal_tokens = terminal.lower().split(" ")
if token.text in terminal_tokens:
matches.append(rule.rule_id)
if len(matches) == 0:
matches = [pad_index]
token_rule_map.append(np.array(matches, dtype=np.int))
return token_rule_map
class EntityMatcher(BasicEntityMatcher):
@overrides
def match(self, question_tokens: List[Token], rules: List,
copy_terminal_set: List, pad_index: int, max_ngram=6):
length = len(question_tokens)
token_rule_map = [list() for i in range(length)]
stop_words = set(stopwords.words('english'))
tidx = 0
while tidx < length:
token = question_tokens[tidx]
if token.text in stop_words:
tidx += 1
continue
for i in range(min(max_ngram, length - tidx)):
string = ' '.join([t.text for t in question_tokens[tidx:tidx + 1 + i]]).strip().lower()
for rule in rules:
if rule.lhs in copy_terminal_set:
terminal_tokens = self.process_terminal_rule(rule)
terminal_string = ' '.join(terminal_tokens)
if string == terminal_string:
# Add rule
for index in range(tidx, tidx + 1 + i):
token_rule_map[index].append(rule.rule_id)
tidx += 1
for midx, m in enumerate(token_rule_map):
if len(m) == 0:
m.append(pad_index)
token_rule_map[midx] = np.array(m, dtype=np.int)
return token_rule_map
class GEOLambdaCalculusEntityMatcher(EntityMatcher):
@overrides
def process_terminal_rule(self, rule):
# Process terminal
terminal = rule.rhs.strip('[] ')
terminal = terminal.replace("'", "").replace('"', "").lower().strip()
terminal = re.sub(' +', ' ', terminal)
terminal_tokens = terminal.split(':')
assert len(terminal_tokens) == 2
terminal_type = terminal_tokens[1]
terminal_tokens = terminal_tokens[0].split("_")
if terminal_type == 'r':
# River
terminal_tokens.remove("river")
elif terminal_type == 'c':
terminal_tokens = terminal_tokens[:-1]
return terminal_tokens
def get_entity_matcher(task, language):
matcher = None
if task == 'atis':
db_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'atis', 'db')
if language in ['lambda', 'lambda2', 'lambda3', 'lambda4',]:
matcher = ATISLambdaCalculusEntityMatcher(db_path)
elif language in ['prolog', 'funql', 'typed_funql', 'prolog2']:
matcher = ATISEntityMatcher(db_path)
else:
matcher = ATISSQLEntityMatcher(db_path)
elif task == 'geo':
if language in ['lambda', 'lambda2']:
matcher = GEOLambdaCalculusEntityMatcher()
else:
matcher = EntityMatcher()
elif task == 'job':
matcher = EntityMatcher()
return matcher
def get_seq2seq_entity_matcher(task, language):
matcher = None
if task == 'atis':
db_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'atis', 'db')
if language in ['lambda', 'lambda2', 'lambda3', 'lambda4',]:
matcher = ATISSeq2SeqLambdaCalculusEntityMatcher(db_path)
elif language in ['prolog', 'funql', 'typed_funql', 'prolog2']:
matcher = ATISSeq2SeqEntityMatcher(db_path)
else:
matcher = ATISSeq2SeqSQLEntityMatcher(db_path)
return matcher
|
from artssat.sensor import PassiveSensor
import numpy as np
class ICI(PassiveSensor):
"""
The Ice Cloud Imager (ICI) sensor.
Attributes:
channels(:code:`list`): List of channels that are available
from ICI
nedt(:code:`list`): Noise equivalent temperature differences for the
channels in :code:`channels`.
"""
channels = np.array([1.749100000000000e+11,
1.799100000000000e+11,
1.813100000000000e+11,
2.407000000000000e+11,
3.156500000000000e+11,
3.216500000000000e+11,
3.236500000000000e+11,
4.408000000000000e+11,
4.450000000000000e+11,
4.466000000000000e+11,
6.598000000000000e+11])
nedt = np.array([0.8, 0.8, 0.8, # 183 GHz
0.7 * np.sqrt(0.5), # 243 GHz
1.2, 1.3, 1.5, # 325 GHz
1.4, 1.6, 2.0, # 448 GHz
1.6 * np.sqrt(0.5)]) # 664 GHz
def __init__(self,
name = "ici",
channel_indices = None,
stokes_dimension = 1):
"""
This creates an instance of the ICI sensor to be used within a
:code:`artssat` simulation.
Arguments:
name(:code:`str`): The name of the sensor used within the artssat
simulation.
channel_indices(:code:`list`): List of channel indices to be used
in the simulation/retrieval.
stokes_dimension(:code:`int`): The stokes dimension to use for
the retrievals.
"""
if channel_indices is None:
channels = ICI.channels
self.nedt = ICI.nedt
else:
channels = ICI.channels[channel_indices]
self.nedt = self.nedt[channel_indices]
super().__init__(name, channels, stokes_dimension = stokes_dimension)
|
from test.integration.base import DBTIntegrationTest, use_profile
import hashlib
import os
from unittest.mock import call, ANY, patch
import dbt.exceptions
import dbt.version
import dbt.tracking
import dbt.utils
class TestEventTracking(DBTIntegrationTest):
maxDiff = None
@property
def profile_config(self):
return {
'config': {
'send_anonymous_usage_stats': True
}
}
@property
def schema(self):
return "event_tracking_033"
@staticmethod
def dir(path):
return path.lstrip("/")
@property
def models(self):
return self.dir("models")
# TODO : Handle the subject. Should be the same every time!
# TODO : Regex match a uuid for user_id, invocation_id?
@patch('dbt.tracking.tracker.track_struct_event')
def run_event_test(
self,
cmd,
expected_calls,
expected_contexts,
track_fn,
expect_pass=True,
expect_raise=False
):
track_fn.reset_mock()
project_id = hashlib.md5(
self.config.project_name.encode('utf-8')).hexdigest()
version = str(dbt.version.get_installed_version())
if expect_raise:
with self.assertRaises(BaseException):
self.run_dbt(cmd, expect_pass=expect_pass)
else:
self.run_dbt(cmd, expect_pass=expect_pass)
user_id = dbt.tracking.active_user.id
invocation_id = dbt.tracking.active_user.invocation_id
self.assertTrue(len(user_id) > 0)
self.assertTrue(len(invocation_id) > 0)
track_fn.assert_has_calls(expected_calls)
ordered_contexts = []
for (args, kwargs) in track_fn.call_args_list:
ordered_contexts.append(
[context.__dict__ for context in kwargs['context']]
)
populated_contexts = []
for context in expected_contexts:
if callable(context):
populated_contexts.append(context(
project_id, user_id, invocation_id, version))
else:
populated_contexts.append(context)
self.assertEqual(
ordered_contexts,
populated_contexts
)
def build_context(
self,
command,
progress,
result_type=None,
adapter_type='postgres'
):
def populate(
project_id,
user_id,
invocation_id,
version
):
return [
{
'schema': 'iglu:com.dbt/invocation/jsonschema/1-0-1',
'data': {
'project_id': project_id,
'user_id': user_id,
'invocation_id': invocation_id,
'version': version,
'command': command,
'progress': progress,
'run_type': 'regular',
'options': None, # TODO : Add options to compile cmd!
'result_type': result_type,
'result': None,
'adapter_type': adapter_type
}
},
{
'schema': 'iglu:com.dbt/platform/jsonschema/1-0-0',
'data': ANY
},
{
'schema': 'iglu:com.dbt/invocation_env/jsonschema/1-0-0',
'data': ANY
}
]
return populate
def run_context(
self,
materialization,
hashed_contents,
model_id,
index,
total,
status,
error=None
):
timing = []
if status != 'ERROR':
timing = [ANY, ANY]
def populate(project_id, user_id, invocation_id, version):
return [{
'schema': 'iglu:com.dbt/run_model/jsonschema/1-0-1',
'data': {
'invocation_id': invocation_id,
'model_materialization': materialization,
'execution_time': ANY,
'hashed_contents': hashed_contents,
'model_id': model_id,
'index': index,
'total': total,
'run_status': status,
'run_error': error,
'run_skipped': False,
'timing': timing,
},
}]
return populate
class TestEventTrackingSuccess(TestEventTracking):
@property
def packages_config(self):
return {
'packages': [
{'git': 'https://github.com/fishtown-analytics/dbt-integration-project', 'warn-unpinned': False},
],
}
@property
def project_config(self):
return {
"data-paths": [self.dir("data")],
"test-paths": [self.dir("test")],
}
@use_profile("postgres")
def test__postgres_event_tracking_compile(self):
expected_calls = [
call(
category='dbt',
action='invocation',
label='start',
context=ANY
),
call(
category='dbt',
action='invocation',
label='end',
context=ANY
),
]
expected_contexts = [
self.build_context('compile', 'start'),
self.build_context('compile', 'end', result_type='ok')
]
self.run_event_test(
["compile", "--vars", "sensitive_thing: abc"],
expected_calls,
expected_contexts
)
@use_profile("postgres")
def test__postgres_event_tracking_deps(self):
package_context = [
{
'schema': 'iglu:com.dbt/package_install/jsonschema/1-0-0',
'data': {
'name': 'c5552991412d1cd86e5c20a87f3518d5',
'source': 'git',
'version': 'eb0a191797624dd3a48fa681d3061212'
}
}
]
expected_calls = [
call(
category='dbt',
action='invocation',
label='start',
context=ANY
),
call(
category='dbt',
action='package',
label=ANY,
property_='install',
context=ANY
),
call(
category='dbt',
action='invocation',
label='end',
context=ANY
),
]
expected_contexts = [
self.build_context('deps', 'start', adapter_type=None),
package_context,
self.build_context('deps', 'end', result_type='ok', adapter_type=None)
]
self.run_event_test(["deps"], expected_calls, expected_contexts)
@use_profile("postgres")
def test__postgres_event_tracking_seed(self):
def seed_context(project_id, user_id, invocation_id, version):
return [{
'schema': 'iglu:com.dbt/run_model/jsonschema/1-0-1',
'data': {
'invocation_id': invocation_id,
'model_materialization': 'seed',
'execution_time': ANY,
'hashed_contents': 'd41d8cd98f00b204e9800998ecf8427e',
'model_id': '39bc2cd707d99bd3e600d2faaafad7ae',
'index': 1,
'total': 1,
'run_status': 'INSERT 1',
'run_error': None,
'run_skipped': False,
'timing': [ANY, ANY],
},
}]
expected_calls = [
call(
category='dbt',
action='invocation',
label='start',
context=ANY
),
call(
category='dbt',
action='run_model',
label=ANY,
context=ANY
),
call(
category='dbt',
action='invocation',
label='end',
context=ANY
),
]
expected_contexts = [
self.build_context('seed', 'start'),
seed_context,
self.build_context('seed', 'end', result_type='ok')
]
self.run_event_test(["seed"], expected_calls, expected_contexts)
@use_profile("postgres")
def test__postgres_event_tracking_models(self):
expected_calls = [
call(
category='dbt',
action='invocation',
label='start',
context=ANY
),
call(
category='dbt',
action='run_model',
label=ANY,
context=ANY
),
call(
category='dbt',
action='run_model',
label=ANY,
context=ANY
),
call(
category='dbt',
action='invocation',
label='end',
context=ANY
),
]
hashed = '20ff78afb16c8b3b8f83861b1d3b99bd'
# this hashed contents field changes on azure postgres tests, I believe
# due to newlines again
if os.name == 'nt':
hashed = '52cf9d1db8f0a18ca64ef64681399746'
expected_contexts = [
self.build_context('run', 'start'),
self.run_context(
hashed_contents='1e5789d34cddfbd5da47d7713aa9191c',
model_id='4fbacae0e1b69924b22964b457148fb8',
index=1,
total=2,
status='CREATE VIEW',
materialization='view'
),
self.run_context(
hashed_contents=hashed,
model_id='57994a805249953b31b738b1af7a1eeb',
index=2,
total=2,
status='CREATE VIEW',
materialization='view'
),
self.build_context('run', 'end', result_type='ok')
]
self.run_event_test(
["run", "--model", "example", "example_2"],
expected_calls,
expected_contexts
)
@use_profile("postgres")
def test__postgres_event_tracking_model_error(self):
# cmd = ["run", "--model", "model_error"]
# self.run_event_test(cmd, event_run_model_error, expect_pass=False)
expected_calls = [
call(
category='dbt',
action='invocation',
label='start',
context=ANY
),
call(
category='dbt',
action='run_model',
label=ANY,
context=ANY
),
call(
category='dbt',
action='invocation',
label='end',
context=ANY
),
]
expected_contexts = [
self.build_context('run', 'start'),
self.run_context(
hashed_contents='4419e809ce0995d99026299e54266037',
model_id='576c3d4489593f00fad42b97c278641e',
index=1,
total=1,
status='ERROR',
materialization='view'
),
self.build_context('run', 'end', result_type='ok')
]
self.run_event_test(
["run", "--model", "model_error"],
expected_calls,
expected_contexts,
expect_pass=False
)
@use_profile("postgres")
def test__postgres_event_tracking_tests(self):
# TODO: dbt does not track events for tests, but it should!
self.run_dbt(["run", "--model", "example", "example_2"])
expected_calls = [
call(
category='dbt',
action='invocation',
label='start',
context=ANY
),
call(
category='dbt',
action='invocation',
label='end',
context=ANY
),
]
expected_contexts = [
self.build_context('test', 'start'),
self.build_context('test', 'end', result_type='ok')
]
self.run_event_test(
["test"],
expected_calls,
expected_contexts,
expect_pass=False
)
class TestEventTrackingCompilationError(TestEventTracking):
@property
def project_config(self):
return {
"source-paths": [self.dir("model-compilation-error")],
}
@use_profile("postgres")
def test__postgres_event_tracking_with_compilation_error(self):
expected_calls = [
call(
category='dbt',
action='invocation',
label='start',
context=ANY
),
call(
category='dbt',
action='invocation',
label='end',
context=ANY
),
]
expected_contexts = [
self.build_context('compile', 'start'),
self.build_context('compile', 'end', result_type='error')
]
self.run_event_test(
["compile"],
expected_calls,
expected_contexts,
expect_pass=False,
expect_raise=True
)
class TestEventTrackingUnableToConnect(TestEventTracking):
@property
def profile_config(self):
return {
'config': {
'send_anonymous_usage_stats': True
},
'test': {
'outputs': {
'default2': {
'type': 'postgres',
'threads': 4,
'host': self.database_host,
'port': 5432,
'user': 'root',
'pass': 'password',
'dbname': 'dbt',
'schema': self.unique_schema()
},
'noaccess': {
'type': 'postgres',
'threads': 4,
'host': self.database_host,
'port': 5432,
'user': 'BAD',
'pass': 'bad_password',
'dbname': 'dbt',
'schema': self.unique_schema()
}
},
'target': 'default2'
}
}
@use_profile("postgres")
def test__postgres_event_tracking_unable_to_connect(self):
expected_calls = [
call(
category='dbt',
action='invocation',
label='start',
context=ANY
),
call(
category='dbt',
action='invocation',
label='end',
context=ANY
),
]
expected_contexts = [
self.build_context('run', 'start'),
self.build_context('run', 'end', result_type='error')
]
self.run_event_test(
["run", "--target", "noaccess", "--models", "example"],
expected_calls,
expected_contexts,
expect_pass=False
)
class TestEventTrackingSnapshot(TestEventTracking):
@property
def project_config(self):
return {
"snapshot-paths": ['snapshots']
}
@use_profile("postgres")
def test__postgres_event_tracking_snapshot(self):
self.run_dbt(["run", "--models", "snapshottable"])
expected_calls = [
call(
category='dbt',
action='invocation',
label='start',
context=ANY
),
call(
category='dbt',
action='run_model',
label=ANY,
context=ANY
),
call(
category='dbt',
action='invocation',
label='end',
context=ANY
),
]
# the model here has a raw_sql that contains the schema, which changes
expected_contexts = [
self.build_context('snapshot', 'start'),
self.run_context(
hashed_contents=ANY,
model_id='820793a4def8d8a38d109a9709374849',
index=1,
total=1,
status='SELECT 1',
materialization='snapshot'
),
self.build_context('snapshot', 'end', result_type='ok')
]
self.run_event_test(
["snapshot"],
expected_calls,
expected_contexts
)
class TestEventTrackingCatalogGenerate(TestEventTracking):
@use_profile("postgres")
def test__postgres_event_tracking_catalog_generate(self):
# create a model for the catalog
self.run_dbt(["run", "--models", "example"])
expected_calls = [
call(
category='dbt',
action='invocation',
label='start',
context=ANY
),
call(
category='dbt',
action='invocation',
label='end',
context=ANY
),
]
expected_contexts = [
self.build_context('generate', 'start'),
self.build_context('generate', 'end', result_type='ok')
]
self.run_event_test(
["docs", "generate"],
expected_calls,
expected_contexts
)
|
#!/usr/bin/python
# $LicenseInfo:firstyear=2014&license=mit$
# Copyright (c) 2014, Linden Research, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# $/LicenseInfo$
"""
Graph the dependencies of a package.
This autobuild sub-command will read an autobuild metadata file and produce a graph of
the dependencies of the project.
Author : Scott Lawrence / Logan Dethrow
Date : 2014-05-09
"""
import os
import sys
import tempfile
try:
import pydot
except ImportError:
# Workaround for an obscure test case: on some TeamCity build hosts, we run
# self-tests from a Mercurial checkout rather than from a pip install. In that
# scenario, pip can't have fulfilled our pydot2 requirement, so import pydot
# will fail. But we don't want a spurious test failure showing up; just skip
# that test.
try:
from nose.plugins.skip import SkipTest
except ImportError:
# whoops, user machine, it's a real problem: use real ImportError
SkipTest = ImportError
# Of course either exception is equally dismaying to an interactive user.
raise SkipTest("Cannot import pydot module; "
"did you use pip install to install autobuild?")
import webbrowser
import common
import logging
import configfile
import autobuild_base
from autobuild_tool_install import extract_metadata_from_package
logger = logging.getLogger('autobuild.graph')
class GraphError(common.AutobuildError):
pass
__help = """\
This autobuild command displays a dependecy graph for a package.
You may either:
1) not specify a file - attempts to show dependencies of the current build tree
2) specify an xml file - interprets the file as an autobuild-package metadata file
and displays its dependencies
3) specify a package file - extracts the metadata from the package and displays
the dependencies of the package
The --rebuild-from <package-name> option prints an ordered list of packages that
must be rebuilt if the specified package is updated.
"""
# define the entry point to this autobuild tool
class AutobuildTool(autobuild_base.AutobuildBase):
def get_details(self):
return dict(name=self.name_from_file(__file__),
description='Graph package dependencies.')
def register(self, parser):
parser.description = "Graph package dependencies."
parser.add_argument('source_file',
nargs="?",
default=None,
help='package or metadata file.')
parser.add_argument('--config-file',
dest='config_filename',
default=configfile.AUTOBUILD_CONFIG_FILE,
help="The file used to describe what should be installed and built\n (defaults to $AUTOBUILD_CONFIG_FILE or \"autobuild.xml\").")
parser.add_argument('--configuration', '-c',
dest='configuration',
help="specify build configuration\n(may be specified in $AUTOBUILD_CONFIGURATION)",
metavar='CONFIGURATION',
default=self.configurations_from_environment())
parser.add_argument('-t', '--type',
dest='graph_type',
choices=["dot", "circo", "neato", "twopi", "fdp", "sfdp"],
default='dot',
help='which graphviz tool should be used to draw the graph')
parser.add_argument('--install-dir',
default=None,
dest='select_dir', # see common.select_directories()
help='Where installed files were unpacked.')
parser.add_argument('--installed-manifest',
default=configfile.INSTALLED_CONFIG_FILE,
dest='installed_filename',
help='The file used to record what is installed.')
parser.add_argument('--no-display',
dest='display', action='store_false', default=True,
help='do not generate and display graph; output dot file on stdout instead')
parser.add_argument('--graph-file', '-g',
dest='graph_file', default=None,
help='do not display graph; store graph file in the specified file')
parser.add_argument('--dot-file', '-D',
dest='dot_file', default=None,
help='save the dot input file in the specified file')
def run(self, args):
platform=common.get_current_platform()
metadata = None
incomplete = ''
if not args.source_file:
# no file specified, so assume we are in a build tree and find the
# metadata in the current build directory
logger.info("searching for metadata in the current build tree")
config_filename = args.config_filename
config = configfile.ConfigurationDescription(config_filename)
metadata_file = os.path.join(config.get_build_directory(args.configuration, platform), configfile.PACKAGE_METADATA_FILE)
if not os.path.exists(metadata_file):
logger.warning("No complete metadata file found; attempting to use partial data from installed files")
# get the absolute path to the installed-packages.xml file
args.all = False
args.configurations = args.configuration
install_dirs = common.select_directories(args, config, "install", "getting installed packages",
lambda cnf:
os.path.join(config.get_build_directory(cnf, platform), "packages"))
installed_pathname = os.path.join(os.path.realpath(install_dirs[0]), args.installed_filename)
if os.path.exists(installed_pathname):
# dummy up a metadata object, but don't create the file
metadata = configfile.MetadataDescription()
# use the package description from the configuration
metadata.package_description = config.package_description
metadata.add_dependencies(installed_pathname)
incomplete = ' (possibly incomplete)'
else:
raise GraphError("No metadata found in current directory")
else:
metadata = configfile.MetadataDescription(path=metadata_file)
elif args.source_file.endswith(".xml"):
# the specified file is an xml file; assume it is a metadata file
logger.info("searching for metadata in autobuild package metadata file %s" % args.source_file)
metadata = configfile.MetadataDescription(path=args.source_file)
if not metadata:
raise GraphError("No metadata found in '%s'" % args.source_file)
else:
# assume that the file is a package archive and try to get metadata from it
logger.info("searching for metadata in autobuild package file %s" % args.source_file)
metadata_stream = extract_metadata_from_package(args.source_file, configfile.PACKAGE_METADATA_FILE)
if metadata_stream is not None:
metadata = configfile.MetadataDescription(stream=metadata_stream)
if not metadata:
raise GraphError("No metadata found in archive '%s'" % args.file)
if metadata:
graph = pydot.Dot(label=metadata['package_description']['name']+incomplete+' dependencies for '+platform, graph_type='digraph')
graph.set('overlap', 'false')
graph.set('splines', 'true')
graph.set('scale', '2')
graph.set('smoothType', 'spring')
graph.set('labelloc', 'top')
graph.set('labeljust', 'center')
graph.set_node_defaults(shape='box')
def add_depends(graph, pkg):
name = pkg['package_description']['name']
got = graph.get_node(name) # can return a single Node instance, a list of Nodes, or None
try:
pkg_node = got if got is None or isinstance(got, pydot.Node) else got[0]
except IndexError: # some versions of pydot may return an empty list instead of None
pkg_node = None
if pkg_node is None:
logger.debug(" graph adding package %s" % name)
# can't use the dict .get to supply an empty string default for these,
# because the value in the dict is None.
pkg_version = pkg['package_description']['version'] if pkg['package_description']['version'] else "";
pkg_build_id = pkg['build_id'] if pkg['build_id'] else "";
# create the new node with name, version, and build id
pkg_node = pydot.Node(name, label="%s\\n%s\\n%s" % (name, pkg_version, pkg_build_id))
if 'dirty' in pkg and (pkg['dirty'] == 'True' or pkg['dirty'] is True):
logger.debug(" setting %s dirty: %s" % (name, ("missing" if 'dirty' not in pkg else "explicit")))
pkg_node.set_shape('ellipse')
pkg_node.set_style('dashed')
graph.add_node(pkg_node)
if 'dependencies' in pkg:
for dep_pkg in pkg['dependencies'].itervalues():
dep_name = dep_pkg['package_description']['name']
dep_node = add_depends(graph, dep_pkg)
logger.debug(" graph adding dependency %s -> %s" % (dep_name, name))
edge = pydot.Edge(dep_name, name)
if 'dirty' in dep_pkg and (dep_pkg['dirty'] == 'True' or dep_pkg['dirty'] is True):
edge.set_style('dashed')
graph.add_edge(edge)
return pkg_node
root = add_depends(graph, metadata)
root.set_root('true')
root.set_shape('octagon')
if args.dot_file:
try:
dot_file=open(args.dot_file,'wb')
except IOError as err:
raise GraphError("Unable to open dot file %s: %s" % (args.dot_file, err))
dot_file.write(graph.to_string())
dot_file.close()
if args.display or args.graph_file:
if args.graph_file:
graph_file = args.graph_file
else:
graph_file = os.path.join(tempfile.gettempdir(),
metadata['package_description']['name'] + "_graph_"
+ args.graph_type + '.png')
logger.info("writing %s" % graph_file)
graph.write_png(graph_file, prog=args.graph_type)
if args.display and not args.graph_file:
webbrowser.open('file:'+graph_file)
else:
print "%s" % graph.to_string()
else:
raise GraphError("No metadata found")
if __name__ == '__main__':
sys.exit("Please invoke this script using 'autobuild %s'" %
AutobuildTool().get_details()["name"])
|
from django.db.models import Q
from django.template import Library, Node, TemplateSyntaxError, resolve_variable
from farms.models import Farm, CropSeason
from operator import itemgetter, attrgetter, methodcaller
from sys import stdout, stderr
register = Library()
class ContextNode(Node):
def __init__(self, func):
self.func = func
def render(self, context):
return self.func(context)
@register.tag
def farm_list(parser, token):
"""
Return a list of all Farm objects corresponding to request.user
"""
def farm_list_wrap(context):
if 'farm_list' in context: return '' # use existing result if present
user = context['request'].user
farm_list = Farm.objects.filter( Q(farmer=user) |
Q(users=user) ).distinct().prefetch_related('field_set')
context['farm_list'] = farm_list
for farm in farm_list:
farm.field_list = farm.field_set.get_queryset()
return ''
return ContextNode(farm_list_wrap)
@register.tag
def crop_season_list(parser, token):
"""
Return a list of all CropSeason objects corresponding to request.user
"""
def crop_season_list_wrap(context):
if 'crop_season_list' in context: return '' # use existing result if present
user = context['request'].user
crop_season_list = CropSeason.objects.filter( Q(field_list__farm__farmer=user) |
Q(field_list__farm__users=user) ).\
distinct().order_by('name'). \
prefetch_related('field_list',
'field_list__farm',
'probe_set',
#'probe_set__field'
)
context['crop_season_list'] = crop_season_list
for crop_season in crop_season_list:
crop_season.year = crop_season.season_start_date.year
crop_season.field_list_all = crop_season.field_list.all()
crop_season.probe_list_all = crop_season.probe_set.all() \
.select_related('field') \
.distinct()
probe_field_list = []
for probe in crop_season.probe_list_all:
probe_field_list.append( (probe.field, probe) )
#probe_field_list.sort(key=lambda i:str(i[0]) )
crop_season.probe_field_list = probe_field_list
return ''
return ContextNode(crop_season_list_wrap)
|
from typing import Optional
from plenum.common.constants import DOMAIN_LEDGER_ID, CURRENT_PROTOCOL_VERSION
from plenum.common.txn_util import reqToTxn
from plenum.test.helper import sdk_random_request_objects
def test_post_genesis_txn_from_catchup_added_to_ledger(looper, txnPoolNodeSet):
node = txnPoolNodeSet[0]
def add_txn_to_ledger(txn_time: Optional[int]) -> dict:
nonlocal node
req = sdk_random_request_objects(1, CURRENT_PROTOCOL_VERSION, identifier='someidentifier')[0]
txn = reqToTxn(req)
node.domainLedger.append_txns_metadata([txn], txn_time=txn_time)
node.domainLedger.appendTxns([txn])
return txn
# Process some genesis txn (which doesn't have timestamp)
genesis_txn = add_txn_to_ledger(txn_time=None)
node.postTxnFromCatchupAddedToLedger(DOMAIN_LEDGER_ID, genesis_txn)
# Process some other txn (which does have timestamp)
other_txn = add_txn_to_ledger(txn_time=13439852)
node.postTxnFromCatchupAddedToLedger(DOMAIN_LEDGER_ID, other_txn)
|
import s3fs
class S3Client(s3fs.S3FileSystem):
"""
Класс-обёртка для доступа к хранилищу S3.
Используется для доступа к объектам хранилища с использованием интерфейса файловой системы.
"""
def __init__(self, *, aws_access_key_id, aws_secret_access_key, namespace=None, endpoint_url=None, **kwargs):
"""
Конструктор объекта файловой системы на S3 SberCloud.
Args:
aws_access_key_id: Публичный ключ доступа к бакету S3
aws_secret_access_key: Приватный ключ доступа к бакету S3
namespace: Идентификатор пространства пользователя в хранилище SberCloud. Используется для формировании
URL web-сервиса S3 SberCloud. Если не задан, то необходимо задать URL в аргументе endpoint_url
endpoint_url: URL web-сервиса S3 SberCloud. Если не задан, то URL будет автоматически
сконструирован на основании значения namespace.
**kwargs: Дополнительные параметры, передаваемые конструктору s3fs.S3FileSystem
"""
if not namespace and not endpoint_url:
raise ValueError('Either namespace or endpoint_url is required')
self.namespace = namespace
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.endpoint_url = endpoint_url
super(S3Client, self).__init__(
key=self.aws_access_key_id,
secret=self.aws_secret_access_key,
client_kwargs={
'endpoint_url': self.endpoint_url
},
**kwargs
)
|
# Time: O(n^2)
# Space: O(n)
#
# Given a 2D binary matrix filled with 0's and 1's,
# find the largest rectangle containing all ones and return its area.
#
class Solution:
# @param matrix, a list of lists of 1 length string
# @return an integer
def maximalRectangle(self, matrix):
if not matrix:
return 0
result = 0
m = len(matrix)
n = len(matrix[0])
L = [0 for _ in xrange(n)]
H = [0 for _ in xrange(n)]
R = [n for _ in xrange(n)]
for i in xrange(m):
left = 0
for j in xrange(n):
if matrix[i][j] == '1':
L[j] = max(L[j], left)
H[j] += 1
else:
L[j] = 0
H[j] = 0
R[j] = n
left = j + 1
right = n
for j in reversed(xrange(n)):
if matrix[i][j] == '1':
R[j] = min(R[j], right)
result = max(result, H[j] * (R[j] - L[j]))
else:
right = j
return result
if __name__ == "__main__":
matrix = ["01101",
"11010",
"01110",
"11110",
"11111",
"00000"]
print Solution().maximalRectangle(matrix)
|
import logging
from protean.container import Element, OptionsMixin
from protean.utils import DomainObjects, derive_element_class
logger = logging.getLogger(__name__)
class BaseApplicationService(Element, OptionsMixin):
"""Base ApplicationService class that all other Application services should inherit from.
This class is a placeholder class for now. Application concepts directly influence the
method names in concrete Application Service classes, so no abstract methods are necessary.
Each Application Service class is usually associated one-to-one with API calls.
Application services are responsible for fetching the linked domain, initializing repositories,
caches, and message brokers, and injecting dependencies into the domain layer. These are automatable
aspects that can be part of the base class in the future.
"""
element_type = DomainObjects.APPLICATION_SERVICE
class Meta:
abstract = True
def __new__(cls, *args, **kwargs):
if cls is BaseApplicationService:
raise TypeError("BaseApplicationService cannot be instantiated")
return object.__new__(cls, *args, **kwargs)
@classmethod
def _default_options(cls):
return []
def application_service_factory(element_cls, **kwargs):
return derive_element_class(element_cls, BaseApplicationService, **kwargs)
|
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
from logging import getLogger
from typing import List
from hardly.handlers import DistGitMRHandler
from hardly.handlers.distgit import PipelineHandler
from packit_service.worker.events import (
Event,
MergeRequestGitlabEvent,
PipelineGitlabEvent,
)
from packit_service.worker.handlers import JobHandler
from packit_service.worker.jobs import SteveJobs
from packit_service.worker.parser import Parser
from packit_service.worker.result import TaskResults
logger = getLogger(__name__)
class StreamJobs(SteveJobs):
def process_jobs(self, event: Event) -> List[TaskResults]:
return [] # For now, don't process default jobs, i.e. copr-build & tests
# return super().process_jobs(event)
def process_message(
self, event: dict, topic: str = None, source: str = None
) -> List[TaskResults]:
"""
Entrypoint for message processing.
:param event: dict with webhook/fed-mes payload
:param topic: meant to be a topic provided by messaging subsystem (fedmsg, mqqt)
:param source: source of message
"""
if topic:
# let's pre-filter messages: we don't need to get debug logs from processing
# messages when we know beforehand that we are not interested in messages for such topic
topics = [
getattr(handler, "topic", None)
for handler in JobHandler.get_all_subclasses()
]
if topic not in topics:
logger.debug(f"{topic} not in {topics}")
return []
event_object = Parser.parse_event(event)
if not (event_object and event_object.pre_check()):
return []
# CoprBuildEvent.get_project returns None when the build id is not known
if not event_object.project:
logger.warning(
"Cannot obtain project from this event! "
"Skipping private repository check!"
)
# DistGitMRHandler handler is (for now) run even the job is not configured in a package.
if isinstance(event_object, MergeRequestGitlabEvent):
DistGitMRHandler.get_signature(
event=event_object,
job=None,
).apply_async()
if isinstance(event_object, PipelineGitlabEvent):
PipelineHandler.get_signature(
event=event_object,
job=None,
).apply_async()
return self.process_jobs(event_object)
|
import turtle
t = turtle.Turtle()
#Screen Configuration
screen = turtle.Screen()
screen.setup(width = 1.0, height = 1.0)
#Turtle configuration
t.pensize(10)
t.pencolor("white")
t.shapesize(3,3,3)
t.fillcolor("blue")
t.shape("turtle")
#blue circle
t.penup()
t.pencolor("blue")
t.goto(0, 0)
t.pendown()
t.circle(100)
#black circle
t.penup()
t.pencolor("black")
t.goto(220, 0)
t.pendown()
t.circle(100)
#red circle
t.penup()
t.pencolor("red")
t.goto(440, 0)
t.pendown()
t.circle(100)
#yellow circle
t.penup()
t.pencolor("yellow")
t.goto(110, -100)
t.pendown()
t.circle(100)
#green circle
t.penup()
t.pencolor("green")
t.goto(330, -100)
t.pendown()
t.circle(100)
turtle.done() |
from Wikipediapagescrapper.pagescrapper import scrapWikipediaPage
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 23 00:23:23 2021
@author: Aditya Sarkar
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv("Kuala_Lumpur_cluster.csv")
names = df['Hotels'].values
x = np.arange(len(names))
w = 0.3
plt.bar(x-w, df['score'].values, width=w, label='Initial Scores')
plt.bar(x, df['polarity_score'].values, width=w, label='Polarity Scores')
plt.xticks(x, names)
plt.ylim([0,10])
plt.tight_layout()
plt.xlabel('Kuala Lumpur Hotels Analysis')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1), fancybox=True, ncol=5)
plt.savefig("Kuala_Lumpur_plot.png", bbox_inches="tight")
plt.show() |
from .user import User, UserModel, UserSignUpModel
|
from Data.Scores import Scores
from Models.TradingViewList import TradingViewList
from Models.FileWriter import FileWriter
scores_dao = Scores()
result = scores_dao.get_best_oscillators()
writer = FileWriter(
file_dir = './tradingView_list.txt'
)
tv_list = TradingViewList(result, writer)
tv_list.create()
print(result)
|
# generate the data depending on the problem structure
import pandas as pd
import os
def read_data(datafolder):
curPath = os.path.join(os.path.abspath(os.path.curdir), 'data', datafolder)
# suppliers
supplier_location = pd.read_csv(os.path.join(curPath, 'suppliers_location.csv'), index_col=0)
suppliers = list(supplier_location.index)
xi = {i: supplier_location.loc[i].values[0] for i in suppliers}
yi = {i: supplier_location.loc[i].values[1] for i in suppliers}
raw_material = pd.read_csv(os.path.join(curPath, 'raw_material_cost.csv'), index_col=0)
# time periods
time_periods = raw_material.columns.values.tolist()
# markets
markets_location = pd.read_csv(os.path.join(curPath, 'markets_location.csv'), index_col=0)
markets = list(markets_location.index)
xj = {j: markets_location.loc[j].values[0] for j in markets}
yj = {j: markets_location.loc[j].values[1] for j in markets}
# facilities
facilities_data = pd.read_csv(os.path.join(curPath, 'facilities_data.csv'), index_col=0)
centr_facilities = ['cf' + str(n) for n in range(1, int(facilities_data.loc['centr']['number']) + 1)]
distr_facilities = ['df' + str(n) for n in range(1, int(facilities_data.loc['distr']['number']) + 1)]
facility_types = {'distr': distr_facilities, 'centr': centr_facilities}
facilities = list(centr_facilities + distr_facilities)
cv = {k: facilities_data.loc[n]['cv'] for n in facility_types.keys() for k in facilities if k in facility_types[n]}
mc = {k: facilities_data.loc[n]['mc'] for n in facility_types.keys() for k in facilities if k in facility_types[n]}
# availability and demand
availability = pd.read_csv(os.path.join(curPath, 'availability.csv'), index_col=0)
a = {(i, t): availability[t][i] for i in suppliers for t in time_periods}
demand = pd.read_csv(os.path.join(curPath, 'demand.csv'), index_col=0)
d = {(j, t): demand[t][j] for j in markets for t in time_periods}
# cost of raw material
RM = {(i, t): raw_material[t][i] for i in suppliers for t in time_periods}
# facility costs
FIC = {(k, t): facilities_data.loc[n]['FIC'] for n in facility_types.keys() for k in facilities if
k in facility_types[n] for t in time_periods}
VIC = {(k, t): facilities_data.loc[n]['VIC'] for n in facility_types.keys() for k in facilities if
k in facility_types[n] for t in time_periods}
FOC = {(k, t): facilities_data.loc[n]['FOC'] for n in facility_types.keys() for k in facilities if
k in facility_types[n] for t in time_periods}
VOC = {(k, t): facilities_data.loc[n]['VOC'] for n in facility_types.keys() for k in facilities if
k in facility_types[n] for t in time_periods}
# transportation costs
trans_costs = pd.read_csv(os.path.join(curPath, 'coeff_trans_costs.csv'), index_col=0, header=None)
ft1 = {(i, k, t): trans_costs.loc['ft1'].values[0] for i in suppliers for k in facilities for t in time_periods}
ft2 = {(k, j, t): trans_costs.loc['ft2'].values[0] for k in facilities for j in markets for t in time_periods}
vt1 = {(i, k, t): trans_costs.loc['vt1'].values[0] for i in suppliers for k in facilities for t in time_periods}
vt2 = {(k, j, t): trans_costs.loc['vt2'].values[0] for k in facilities for j in markets for t in time_periods}
# interest rate
interest_rate = 0.01
interest_factor = {t: 1/(1+interest_rate)**(time_periods.index(t)) for t in time_periods}
data = [suppliers, xi, yi, time_periods, markets, xj, yj, centr_facilities, distr_facilities, facilities, cv, mc,
a, d, RM, FIC, VIC, FOC, VOC, ft1, ft2, vt1, vt2, interest_factor]
return data
|
import requests
import json
# REFERENCE: https://developers.meethue.com/develop/get-started-2/
class HueController(object):
def __init__(self):
self.user = '7wl8U1CnKZlk6kE8WHPzCQatw5VqWb0oiqjZFinR'
self.api_base = f'http://192.168.1.4/api/{self.user}'
self.group_map = {}
self.light_map = {}
self.group_aliases = {
'Lamp': [
'living room',
'livingroom',
'lamp'
],
'Bedroom': [
'bed room',
'bedroom',
'master bedroom',
'master bed room'
],
'Craft Room': [
'office',
'craftroom',
'craft room'
]
}
self._init_states()
def _init_states(self):
groups = self.get_groups()
if groups.status_code != 200:
print(f'Cannot reach Hue bridge at {self._build_url(["groups"])}')
exit(1)
for id, group in groups.json().items():
self.group_map[group['name']] = id
lights = self.get_lights()
if lights.status_code != 200:
print(f'Cannot reach Hue bridge at {self._build_url(["lights"])}')
exit(1)
for id, light in lights.json().items():
self.light_map[light['name']] = id
def _build_url(self, parts: list) -> str:
return '/'.join([self.api_base, *parts])
def _clamp_brightness(self, bright: int) -> int:
return max(0, min(int(254 * (bright / 100)), 254))
def get_lights(self) -> requests.Response:
return requests.get(
url=self._build_url(['lights'])
)
def _get_light_id(self, name: str) -> str:
if name not in self.light_map.keys():
print(f'ERROR: Cannot find Light named {name}')
exit(1)
return str(self.light_map[name])
def get_light_by_name(self, name: str) -> requests.Response:
return requests.get(
url=self._build_url(['lights', self._get_light_id(name)])
)
def turn_on_light(self, id: str, bright: int = None) -> requests.Response:
body = {'on': True}
if bright is not None:
body['bri': self._clamp_brightness(bright)]
return requests.put(
url=self._build_url(['lights', id, 'state']),
data=json.dumps(body)
)
def turn_off_light(self, id: str) -> requests.Response:
return requests.put(
url=self._build_url(['lights', id, 'state']),
data=json.dumps({'on': False})
)
def _set_light_bright(self, id: str, bright: int) -> requests.Response:
return requests.put(
url=self._build_url(['lights', id, 'state']),
data=json.dumps({'bri': bright})
)
def get_groups(self) -> requests.Response:
return requests.get(
url=self._build_url(['groups'])
)
def get_group_names(self) -> list:
resp = self.get_groups()
if resp.status_code != 200:
print('Cannot reach Hue bridge to get Groups!')
exit(1)
return [group['name'] for group in resp.json().values()]
def _get_group_id(self, name: str) -> str:
group_name = self._group_name_from_alias(name)
if group_name == '':
print(f'ERROR: Cannot find Group named {name}')
exit(1)
return str(self.group_map[group_name])
def _group_name_from_alias(self, alias: str) -> str:
for group, aliases in self.group_aliases.items():
if alias == group.lower() or alias in aliases:
return group
return ''
def get_group_by_name(self, name: str) -> requests.Response:
return requests.get(
url=self._build_url(['groups', self._get_group_id(name)])
)
def turn_on_group(self, name: str, bright=None) -> requests.Response:
# If we are setting the brightness, we should set all the lights
# before turning them on, otherwise use previous brightness
if bright is not None:
bright = self._clamp_brightness(bright)
else:
bright = self._clamp_brightness(100)
group = self.get_group_by_name(name).json()
if not group['state']['all_on']:
body = {'on': True, 'bri': self._clamp_brightness(bright)}
requests.put(
url=self._build_url(
['groups', self._get_group_id(name), 'action']
),
data=json.dumps(body)
)
for light_id in group['lights']:
resp = self._set_light_bright(light_id, bright)
if resp.status_code != 200:
print(f'ERROR: Could not access Light {light_id}')
def turn_off_group(self, name: str) -> requests.Response:
return requests.put(
url=self._build_url(
['groups', self._get_group_id(name), 'action']
),
data=json.dumps({'on': False})
)
|
from abc import ABC, abstractmethod
from dataclasses import field, dataclass
from typing import List
import numpy as np
from dataclasses_json import dataclass_json
from paiargparse import pai_dataclass
from tfaip.util.enum import StrEnum
from calamari_ocr.ocr.predict.params import (
Prediction,
PredictionPosition,
PredictionCharacter,
)
class CTCDecoderType(StrEnum):
Default = "default"
TokenPassing = "token_passing"
WordBeamSearch = "word_beam_search"
@pai_dataclass
@dataclass
class CTCDecoderParams:
type: CTCDecoderType = CTCDecoderType.Default
blank_index: int = 0
min_p_threshold: float = 0
beam_width = 25
non_word_chars: List[str] = field(default_factory=lambda: list("0123456789[]()_.:;!?{}-'\""))
dictionary: List[str] = field(default_factory=list)
word_separator: str = " "
def create_ctc_decoder(codec, params: CTCDecoderParams = None):
params = params or CTCDecoderParams()
if params.type == CTCDecoderType.Default:
from .default_ctc_decoder import DefaultCTCDecoder
return DefaultCTCDecoder(params, codec)
elif params.type == CTCDecoderType.TokenPassing:
from .token_passing_ctc_decoder import TokenPassingCTCDecoder
return TokenPassingCTCDecoder(params, codec)
elif params.type == CTCDecoderType.WordBeamSearch:
from .ctcwordbeamsearchdecoder import WordBeamSearchCTCDecoder
return WordBeamSearchCTCDecoder(params, codec)
raise NotImplemented
class CTCDecoder(ABC):
def __init__(self, params, codec):
super().__init__()
self.params = params
self.codec = codec
@abstractmethod
def decode(self, probabilities):
"""
Decoding algorithm of the individual CTCDecoder. This abstract function is reimplemented
by the DefaultCTCDecoder and the FuzzyCTCDecoder.
Parameters
----------
probabilities : array_like
Prediction probabilities of the neural net to decode or shape (length x character probability).
The blank index must be 0.
Returns
-------
a Prediction object
"""
return Prediction()
def _prediction_from_string(self, probabilities, sentence):
pred = Prediction()
pred.labels[:] = self.codec.encode(sentence)
pred.is_voted_result = False
pred.logits = probabilities
for c, l in zip(sentence, pred.labels):
pred.positions.append(PredictionPosition(chars=[PredictionCharacter(label=l, char=c, probability=1.0)]))
return pred
def find_alternatives(self, probabilities, sentence, threshold) -> Prediction:
"""
Find alternatives to the decoded sentence in the logits.
E.g. if a 'c' is decoded in the range 2 to 4, this algorithm will add all characters in the interval [2, 4] to
the output if the confidence of the character is higher than the threshold, respectively.
Parameters
----------
probabilities : array_like
Prediction of the neural net to decode or shape (length x character probability).
The blank index must be 0.
sentence : list of tuple (character index, start pos, end pos)
The decoded sentence (depends on the CTCDecoder).
The position refer to the character position in the logits.
threshold : float
Minimum confidence for alternative characters to be listed.
Returns
-------
a Prediction object
"""
# find alternatives
pred = Prediction()
pred.labels[:] = [c for c, _, _ in sentence]
pred.is_voted_result = False
pred.logits = probabilities
pred.avg_char_probability = 0
for c, start, end in sentence:
p = probabilities[start:end]
p = np.max(p, axis=0)
pos = PredictionPosition(local_start=start, local_end=end - 1)
pred.positions.append(pos)
for label in reversed(sorted(range(len(p)), key=lambda v: p[v])):
if p[label] < threshold and len(pos.chars) > 0:
break
else:
pos.chars.append(
PredictionCharacter(
label=label,
probability=p[label],
)
)
if len(pos.chars) > 0:
pred.avg_char_probability += pos.chars[0].probability
pred.avg_char_probability /= len(pred.positions) if len(pred.positions) > 0 else 1
return pred
|
import numpy as np
from sympy import mpmath as mp
from matplotlib import pyplot as plt
def singular_surface_plot(f, mn=-1., mx=1., res=500, threshold=2., lip=.1):
""" Plots the absolute value of a function as a surface plot """
pass
def partial_fractions(p, q):
""" Finds the partial fraction representation of the rational
function 'p' / 'q' where 'q' is assumed to not have any repeated
roots. 'p' and 'q' are both assumed to be numpy poly1d objects.
Returns two arrays. One containing the coefficients for
each term in the partial fraction expansion, and another containing
the corresponding roots of the denominators of each term. """
pass
def cpv(p, q, tol = 1E-8):
""" Evaluates the cauchy principal value of the integral over the
real numbers of 'p' / 'q'. 'p' and 'q' are both assumed to be numpy
poly1d objects. 'q' is expected to have a degree that is
at least two higher than the degree of 'p'. Roots of 'q' with
imaginary part of magnitude less than 'tol' are treated as if they
had an imaginary part of 0. """
pass
def count_roots(p):
""" Counts the number of roots of the polynomial object 'p' on the
interior of the unit ball using an integral. """
pass
|
from argparse import ArgumentParser
import os
from inference import exec_inference
def main():
parser = ArgumentParser()
parser.add_argument("-m", "--model-name",dest="model_name", default="resnet18",type=str)
parser.add_argument("-bs", "--batch-size",dest="batch_size", default=128,type=int)
parser.add_argument("-p", "--parallel-exec",dest="parallel_exec", default=1,type=int)
args = parser.parse_args()
exec_inference(args)
if __name__ == "__main__":
main()
|
from textwrap import dedent
from tests import check_as_expected
ROOT = 'superhelp.helpers.dict_help.'
def test_misc():
test_conf = [
(
dedent("""\
pet = 'cat'
"""),
{
ROOT + 'dict_overview': 0,
ROOT + 'mixed_key_types': 0,
}
),
(
dedent("""\
capitals = {
'NZ': 'Wellington',
'Australia': 'Canberra',
'Japan': 'Tokyo',
}
"""),
{
ROOT + 'dict_overview': 1,
ROOT + 'mixed_key_types': 0,
}
),
(
dedent("""\
capitals = {
'NZ': 'Wellington',
'Australia': 'Canberra',
'Japan': 'Tokyo',
}
"""),
{
ROOT + 'dict_overview': 1,
ROOT + 'mixed_key_types': 0,
}
),
(
dedent("""\
capitals = {
'NZ': 'Wellington',
'Australia': 'Canberra',
'Japan': 'Tokyo',
}
capitals2 = {
'NZ': 'Wellington',
'Australia': 'Canberra',
'Japan': 'Tokyo',
}
"""),
{
ROOT + 'dict_overview': 2,
ROOT + 'mixed_key_types': 0,
}
),
(
dedent("""\
mixed = {
'a': 'string',
1: 'integer',
}
"""),
{
ROOT + 'dict_overview': 1,
ROOT + 'mixed_key_types': 1,
}
),
(
dedent("""\
for i in range(2):
mixed = {
'a': 'string',
1: 'integer',
}
"""),
{
ROOT + 'dict_overview': 1,
ROOT + 'mixed_key_types': 1,
}
),
(
dedent("""\
name = dict([('NZ', 'Wellington'), ('Australia', 'Canberra')])
"""),
{
ROOT + 'dict_overview': 1,
ROOT + 'mixed_key_types': 0,
}
),
(
dedent("""\
name = dict([('NZ', 'Wellington'), (1, 'Canberra')])
"""),
{
ROOT + 'dict_overview': 1,
ROOT + 'mixed_key_types': 1,
}
),
(
dedent("""\
name = dict([])
"""),
{
ROOT + 'dict_overview': 1,
ROOT + 'mixed_key_types': 0,
}
),
]
check_as_expected(test_conf, execute_code=True)
check_as_expected(test_conf, execute_code=False)
# test_misc()
|
import requests
import itertools
import os
import names
import random
import string
url = 'http://localhost:5000/'
session = requests.session()
# os.system('rm ./db.sqlite')
colleges = ["Christ Church", "Exeter", "Magdalen", "St John's", "Jesus", "Wadham", "Univ", "Trinity", "Balliol"]
departments = ["American Institute",
"Art",
"Classics",
"English Language and Literature",
"History",
"History of Art",
"Linguistics, Philology & Phonetics",
"Medieval and Modern Languages",
"Music",
"Oriental Studies",
"Philosophy",
"Theology and Religion",
"Chemistry",
"Computer Science",
"e-Research Centre",
"Earth Sciences",
"Engineering Science",
"Life Sciences Interface Doctoral Training Centre",
"Materials",
"Mathematics",
"Physics",
"Plant Sciences",
"Statistics",
"Zoology",
"Biochemistry",
"Clinical Medicine",
"Clinical Neurosciences",
"Experimental Psychology",
"Medicine",
"Oncology",
"Orthopaedics, Rheumatology and Musculoskeletal Sciences",
"Paediatrics",
"Pathology",
"Pharmacology",
"Physiology, Anatomy & Genetics",
"Population Health",
"Primary Care Health Sciences",
"Psychiatry",
"Surgical Sciences",
"Women's & Reproductive Health",
"Anthropology and Museum Ethnography",
"Archaeology",
"Business",
"Economics",
"Education",
"Geography and the Environment",
"Global and Area Studies",
"Government",
"International Development",
"Internet Institute",
"Law",
"Oxford Martin School",
"Politics and International Relations",
"Social Policy and Intervention",
"Sociology",
"Continuing Education"]
genders = ["male", "male", "male", "male", "male", "female", "female", "female", "female", "female", "non-binary", "prefer not to disclose"]
pronouns = ["he/him", "she/her", "they/them"]
hobbies = ["football", "tennis", "basketball", "swimming", "chess", "eating", "swimming", "cooking", "CS", "programming", "coding", "gamming" ]
u_number = 25
publish_number = 40
match_number = 20
accept_number = 5
u_ids = []
u_names = [names.get_full_name() for i in range(u_number)]
u_college = [random.choice(colleges) for i in range(u_number)]
u_department = [random.choice(departments) for i in range(u_number)]
u_email = [str(random.randint(100000, 1000000)) + "@ox.ac.uk" for i in range(u_number)]
u_gender = [random.choice(genders) for i in range(u_number)]
u_pronoun = [random.choice(pronouns) for i in range(u_number)]
u_year = [random.randint(2020, 2024) for i in range(u_number)]
u_phone = [str(random.randint(1000000000, 10000000000)) for i in range(u_number)]
u_password = [''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=10)) for i in range(u_number)]
def generate_users (u_number):
for i in range(u_number):
result = session.post(url+'auth/register', json={
'name': u_names[i],
'college': u_college[i],
'department': u_department[i],
'email': u_email[i],
'year': u_year[i],
'phone_number': u_phone[i],
'gender': u_gender[i],
'pronouns': u_pronoun[i],
'description': 'I like ' + random.choice(hobbies),
'password': u_password[i],
})
print(result.text)
# print(":", result.json()["email"], result.json()["password"])
u_ids.append(result.json()["user_id"])
user_logout()
def user_login (uid):
print (session.post(url+'auth/login', json={
'email': u_email[uid],
'password': u_password[uid],
}).text)
def user_logout():
print (session.get(url+'auth/logout').text)
loc = ["Christ Church Meadow", "University Park", "Boatyard"]
act = ["walk", "run", "dog walking", "dove feeding"]
def publish_hangout (uid, if_requirement = False):
if if_requirement:
print (session.post(url+'auth/publish', json={
'time': "2020-11-15 11 p.m.",
'location': random.choice(loc),
'activity': random.choice(act),
'cond_name': "*",
'cond_college': "*",
'cond_department': "*",
'cond_gender': "*",
'cond_year': 0,
}).text)
else:
print (session.post(url+'auth/publish', json={
'time': random.choice(["2020-11-08 09", "2020-11-08 11", "2020-11-16 09", "2020-11-20 17", "2020-11-17 11", "2020-11-13 11", "2020-11-14 04", "2020-11-08 10"]),
'location': random.choice(loc),
'activity': random.choice(act),
'cond_name': "*",
'cond_college': "*",
'cond_department': "*",
'cond_gender': "*",
'cond_year': 0,
}).text)
def get_feeds ():
results = session.post(url+'auth/my_feed', json = {})
result_data = results.json()
return results.json()["feeds"]
def match (hid):
print(session.post(url+'auth/take', json = {
'hid': hid
}).text)
def get_cands ():
results = session.post(url+'auth/my_hangouts', json = {
'hangout_type': 'matched'
})
result_data = results.json()
return results.json()['hangouts']
def accept (hid):
print(session.post(url+'auth/accept', json = {
'hid': hid
}).text)
generate_users (u_number)
print("begin publishing")
for i in range(publish_number):
uid = random.randint(0, u_number - 1)
user_login(uid)
publish_hangout (random.randint(0, u_number - 1))
user_logout()
print("begin matching")
for i in range(match_number):
uid = random.randint(0, u_number - 1)
user_login(uid)
feeds = get_feeds()
print("user: ", uid, "len: ", len(feeds))
if len(feeds) == 0:
continue
match(random.choice(feeds)['hangout_id'])
# print("id::", random.choice(feeds)['hangout_id'])
user_logout()
print("begin choosing")
for i in range(accept_number ):
uid = random.randint(0, u_number - 1)
user_login(uid)
cand = get_cands()
if len(cand) > 0:
accept(random.choice(cand)['hangout_id'])
user_logout()
|
import numpy as np
import scipy.optimize
from util import logistic, log_likelihood, avg_log_likelihood, log_determinant, transform_to_rbf
# class RBFWrapper(object):
# def __init__(self, base_model, rbf_width, radial_basis=None):
# self.model = base_model
# self.rbf_width = rbf_width
# self.radial_basis = radial_basis
#
# def transform_to_rbf(self, x, radial_basis=None):
# if radial_basis is None:
# assert self.radial_basis is not None
# radial_basis = self.radial_basis
# return transform_to_rbf(x, radial_basis, self.rbf_width)
#
# def predict(self, x):
# return self.model.predict(self.transform_to_rbf(x))
class LogisticClassifier(object):
def __init__(self, input_size):
self.input_size = input_size
self.weights = np.zeros([input_size], dtype=np.float64)
self.num_steps = 0
return
def update_weights(self, x, y, lr):
grad_log_lik = x.T.dot((y - logistic(x.dot(self.weights))))
self.weights += lr * grad_log_lik
return
def compute_avg_ll(self, x, y):
"""
Compute the avg. log likelihood of the parameters given input x and labels y.
"""
output_prob = self.predict(x)
return avg_log_likelihood(y_true=y, y_pred=output_prob)
def predict(self, x):
return logistic(np.dot(x, self.weights))
def predict_with_expanded(self, x, expand_func):
return self.predict(expand_func(x))
def hard_predict(self, x):
return np.where(self.predict(x) > .5, 1, 0)
class LaplaceLogisticClassifier(object):
"""
Bayesian Logistic Classifier with Laplace Approximation
"""
def __init__(self, input_size, prior_mean=0., prior_var=1.):
self.input_size = input_size
self.prior_mean = prior_mean
self.prior_var = prior_var
self.weights_map = None
self.inv_covar = None
self.covar = None
# self.evidence = None
# self.log_evidence = None
return
def fit_map(self, x, y, x_init=None):
"""
Compute the weight values for the Maximum-a-posteriori of the weight posterior
"""
if x_init is None:
x_init = np.zeros([self.input_size], dtype=np.float64)
def neg_log_posterior_func(weights):
return -self.log_posterior_trunc(weights, x, y)
def neg_log_posterior_jacobian(weights):
return -self.log_posterior_jacobian(weights, x, y)
res = scipy.optimize.minimize(neg_log_posterior_func, x_init, method='L-BFGS-B',
jac=neg_log_posterior_jacobian)
if res['success']:
self.weights_map = res['x']
else:
raise Exception('Unsuccessful optimisation:\n' + str(res['message']))
# res = scipy.optimize.fmin_l_bfgs_b(neg_log_posterior_func, x0=x_init, fprime=neg_log_posterior_jacobian)
# self.weights_map = res[0]
return
def log_posterior_trunc(self, weights: np.ndarray, x: np.ndarray, y: np.ndarray):
"""
Compute the log-posterior excluding the log of Gaussian normalising constant on the prior (i.e. only
keep the terms dependent on the weights). Used for optimisation purposes
"""
ll_term = np.sum(logistic_log_likelihood(weights, x, y))
prior_term = - 0.5 * np.sum((weights - self.prior_mean)**2) / self.prior_var
return ll_term + prior_term
def log_unnorm_posterior(self, weights: np.ndarray, x: np.ndarray, y: np.ndarray):
"""
Compute the log-unnormalised-posterior
"""
ll_term = np.sum(logistic_log_likelihood(weights, x, y))
prior_term = -.5 * np.sum((weights - self.prior_mean)**2) / self.prior_var - \
.5 * self.input_size * np.log((2 * np.pi * self.prior_var))
return ll_term + prior_term
def log_posterior_jacobian(self, weights: np.ndarray, x: np.ndarray, y: np.ndarray):
"""
Compute the grad of the log-posterior
"""
grad_prior_term = - (weights - self.prior_mean) / self.prior_var
grad_ll_term = logistic_ll_jacobian(weights, x, y)
return grad_prior_term + grad_ll_term
def calc_laplace_covariance(self, x):
"""
Use the current MAP estimate to fit the covariance to the laplace approximation to the posterior.
Note that this does not depend on the true labels
"""
inv_covar = -logistic_ll_hessian(self.weights_map, x) + np.eye(self.input_size) / self.prior_var
self.inv_covar = inv_covar
# Make sure the matrix is positive definite todo
return inv_covar
def fit_laplace_approx(self, x, y):
if self.weights_map is None:
self.fit_map(x, y)
self.calc_laplace_covariance(x)
self.covar = np.linalg.inv(self.inv_covar)
self.calc_evidence(x, y)
def calc_evidence(self, x, y):
"""
The approximate normalising constant from Laplace approx.
"""
if self.inv_covar is None:
raise ReferenceError("self.inv_covar is None. The inverse of covariance has not been calculated yet, but is"
" needed for calc_evidence.")
log_prob_of_data = np.sum(logistic_log_likelihood(self.weights_map, x, y)) - \
.5 * np.sum((self.weights_map - self.prior_mean) ** 2) / self.prior_var - \
.5 * self.input_size * np.log(self.prior_var) - \
.5 * log_determinant(self.inv_covar)
prob_of_data = np.exp(log_prob_of_data)
self.log_evidence = log_prob_of_data
self.evidence = prob_of_data
return log_prob_of_data, prob_of_data
def bayesian_predict(self, x):
pred_var = np.sum(x * np.dot(x, self.covar), axis=1)
pred_mean = np.dot(x, self.weights_map)
kappa = (1 + np.pi * pred_var / 8)**(-0.5) # as defined in Bishop's book chapter 4
bayes_predictions = logistic(pred_mean * kappa)
return bayes_predictions
def predict(self, x):
return self.bayesian_predict(x)
def hard_bayes_predict(self, x):
return np.where(self.bayesian_predict(x) > .5, 1, 0)
# class RBFLaplaceLogisticClassifier(LaplaceLogisticClassifier):
# def __init__(self, rbf_width, radial_basis, prior_mean=0., prior_var=1.):
# input_size = radial_basis.shape[0] + 1
# self.rbf_width = rbf_width
# self.radial_basis = radial_basis
# super().__init__(input_size, prior_mean=prior_mean, prior_var=prior_var)
#
# def transform_data(self, x):
# return transform_to_rbf(x, self.radial_basis, self.rbf_width, add_bias_term=True)
#
# def fit_map(self, x, y, x_init=None):
# super().fit_map(self.transform_data(x), y, x_init)
#
# def fit_laplace_approx(self, x, y):
# super().fit_laplace_approx(self.transform_data(x), y)
#
# def predict(self, x):
# super().predict(self.transform_data(x))
#
# def bayesian_predict(self, x):
# super().bayesian_predict(self.transform_data())
#
# def calc_evidence(self, x, y):
# super().calc_evidence(self.transform_data(x), y)
def logistic_log_likelihood(weights, x, y):
output_probs = logistic(np.dot(x, weights))
return log_likelihood(y_true=y, y_pred=output_probs)
def logistic_ll_jacobian(weights, x, y):
return x.T.dot((y - logistic(x.dot(weights))))
def logistic_ll_hessian(weights, x):
"""Note the Hessian does not depend on the true labels."""
output_probs = logistic(np.dot(x, weights))
hessian = np.zeros([x.shape[1]] * 2, dtype=np.float64)
for i in range(x.shape[0]):
hessian -= output_probs[i] * (1 - output_probs[i]) * np.outer(x[i, :], x[i, :])
return hessian
# return -np.sum(output_probs * (1 - output_probs) * np.matmul(x[:, :, None], x[:, None, :]), axis=0)
|
from common import loghandler
class logger():
''' a logger class handles a log system which write logitems to stdout and/or
a file
stdout (bool): whether to write log to stdout
writer (StreamWriter): file writer (None for no file output)
showdate (bool): if you dont want to output date in the time field
minseverity (str): minimium log severity level to be written
'''
def __init__(self, stdout=True, writer=None, showdate=False,
minseverity="INFO"):
self.stdout = stdout
self.showdate = showdate
self.writer = writer
self.minseverity = minseverity
def _outputitem(self, item):
if loghandler.severity_gt(self.minseverity, item.severity):
return # log item is less severe than minseverity
if self.stdout:
item.Printlogitem(self.showdate)
if self.writer:
item.Writelogitem(self.writer, self.showdate)
def log_info_message(self, source, desc):
item = loghandler.logitem(source, 'INFO', desc)
self._outputitem(item)
def log_warning_message(self, source, desc):
item = loghandler.logitem(source, 'WARNING', desc)
self._outputitem(item)
def log_severe_message(self, source, desc):
item = loghandler.logitem(source, 'SEVERE', desc)
self._outputitem(item)
def log_fatal_message(self, source, desc):
item = loghandler.logitem(source, 'FATAL', desc)
self._outputitem(item)
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from license_manager_simulator.config import settings
engine = create_engine(settings.DATABASE_URL)
session = sessionmaker(autocommit=False, autoflush=False, bind=engine, future=True)
Base = declarative_base()
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import print_function, unicode_literals
import json
import os
import os.path
import re
import unicodedata
# Unique identifier for the workflow
WORKFLOW_UID = 'com.calebevans.youversionsuggest'
# Path to the user's home directory
HOME_DIR_PATH = os.path.expanduser('~')
# Path to the directory where this workflow stores non-volatile local data
LOCAL_DATA_DIR_PATH = os.path.join(
HOME_DIR_PATH, 'Library', 'Application Support', 'Alfred',
'Workflow Data', WORKFLOW_UID)
# Path to the directory containing data files apart of the packaged workflow
PACKAGED_CODE_DIR_PATH = os.path.join(os.getcwd(), 'yvs')
# The template used to build the URL for a Bible reference
REF_URL_TEMPLATE = 'https://www.bible.com/bible/{ref}'
# Creates the directory (and any nonexistent parent directories) where this
# workflow stores non-volatile local data
def create_local_data_dir():
try:
os.makedirs(LOCAL_DATA_DIR_PATH)
except OSError:
pass
# Retrieves bible data object (books, versions, etc.) for the given language
def get_bible(language_id):
bible_path = os.path.join(
PACKAGED_CODE_DIR_PATH, 'data', 'bible',
'bible-{}.json'.format(language_id))
with open(bible_path, 'r') as bible_file:
return json.load(bible_file)
# Retrieves metadata for every book of the Bible, including chapter counts
def get_book_metadata():
book_metadata_path = os.path.join(
PACKAGED_CODE_DIR_PATH, 'data', 'bible', 'book-metadata.json')
with open(book_metadata_path, 'r') as book_metadata_file:
return json.load(book_metadata_file)
# Retrieves name of first book whose id matches the given id
def get_book(books, book_id):
for book in books: # pragma: no branch
if book['id'] == book_id:
return book['name']
# Retrieves first version object whose id matches the given id
def get_version(versions, version_id):
for version in versions:
if version['id'] == version_id:
return version
# Retrieves a list of all supported versions for the given language
def get_versions(language_id):
bible = get_bible(language_id)
return bible['versions']
# Retrieves a list of all supported languages
def get_languages():
languages_path = os.path.join(
PACKAGED_CODE_DIR_PATH, 'data', 'bible', 'languages.json')
with open(languages_path, 'r') as languages_file:
return json.load(languages_file)
# Build the object for a single result list feedback item
def get_result_list_feedback_item(result):
item = result.copy()
item['text'] = result.get('text', {}).copy()
# Text copied to clipboard when cmd-c is invoked for this result
item['text']['copy'] = item['text'].get('copy', result['title'])
# Text shown when invoking Large Type for this result
item['text']['largetype'] = item['text'].get('largetype', result['title'])
# Use different args when different modifiers are pressed
item['mods'] = result.get('mods', {}).copy()
item['mods']['ctrl'] = item['mods'].get('ctrl', {'arg': result['title']})
# Icon shown next to result text
item['icon'] = {
'path': 'icon.png'
}
return item
# Constructs an Alfred JSON string from the given result list
def get_result_list_feedback_str(results):
return json.dumps({
'items': [get_result_list_feedback_item(result) for result in results]
})
# Functions for accessing/manipulating mutable preferences
# Retrieves the path to the workflow's default user preferences file
def get_default_user_prefs_path():
return os.path.join(
PACKAGED_CODE_DIR_PATH, 'preferences', 'defaults.json')
# Retrieves the default values for all workflow preferences
def get_default_user_prefs():
with open(get_default_user_prefs_path(), 'r') as defaults_file:
return json.load(defaults_file)
# Retrieves the path to the workflow's user preferences file
def get_user_prefs_path():
return os.path.join(LOCAL_DATA_DIR_PATH, 'preferences.json')
# Overrwrites (or creates) user preferences using the given preferences object
def set_user_prefs(user_prefs):
# Always ensure that the data directory (where prefrences reside) exists
create_local_data_dir()
with open(get_user_prefs_path(), 'w') as prefs_file:
json.dump(user_prefs, prefs_file, indent=2, separators=(',', ': '))
# Extends user preferences with any missing keys
def extend_user_prefs(user_prefs, default_user_prefs):
# Add any missing preferences
for pref_key in default_user_prefs:
if pref_key not in user_prefs:
user_prefs[pref_key] = default_user_prefs[pref_key]
# Remove any obsolete preferences
for pref_key in user_prefs.keys():
if pref_key not in default_user_prefs:
del user_prefs[pref_key]
return user_prefs
# Retrieves map of user preferences
def get_user_prefs():
default_user_prefs = get_default_user_prefs()
try:
with open(get_user_prefs_path(), 'r') as prefs_file:
return extend_user_prefs(
json.load(prefs_file), default_user_prefs)
except IOError:
# If user preferences don't exist, create them
set_user_prefs(default_user_prefs)
return default_user_prefs
# Query-related functions
# Normalizes the format of the query string
def normalize_query_str(query_str):
# Normalize all Unicode characters
query_str = unicodedata.normalize('NFC', query_str)
query_str = query_str.lower()
# Remove all non-alphanumeric characters
query_str = re.sub(r'[\W_]', ' ', query_str, flags=re.UNICODE)
# Remove extra whitespace
query_str = query_str.strip()
query_str = re.sub(r'\s+', ' ', query_str)
return query_str
# Parses the given reference UID into a dictionary representing that reference
def get_ref(ref_uid, user_prefs):
patt = r'^{version}/{book_id}\.{chapter}(?:\.{verse}{endverse})?$'.format(
version=r'(\d+)',
book_id=r'(\d?[a-z]+)',
chapter=r'(\d+)',
verse=r'(\d+)',
endverse=r'(?:-(\d+))?')
ref_uid_match = re.match(patt, ref_uid)
ref = {
'uid': ref_uid,
'book_id': ref_uid_match.group(2),
'version_id': int(ref_uid_match.group(1)),
'chapter': int(ref_uid_match.group(3))
}
# Include book name using book ID and currently-set language
bible = get_bible(user_prefs['language'])
book_name = get_book(bible['books'], ref['book_id'])
ref['book'] = book_name
# Include verse number if it exists
verse_match = ref_uid_match.group(4)
if verse_match:
ref['verse'] = int(verse_match)
# Include end verse number if it exists
endverse_match = ref_uid_match.group(5)
if endverse_match:
ref['endverse'] = int(endverse_match)
# Include full version name (acronym) if it exists
version_name = get_version(bible['versions'], ref['version_id'])['name']
ref['version'] = version_name
return ref
# Retrieves the basic reference name without the version abbreviation
def get_basic_ref_name(ref):
ref_name = '{book} {chapter}'.format(
book=ref['book'],
chapter=ref['chapter'])
if 'verse' in ref:
ref_name += ':{verse}'.format(verse=ref['verse'])
if 'endverse' in ref:
ref_name += '-{endverse}'.format(endverse=ref['endverse'])
return ref_name
# Retrieves the full reference name with the version abbreviation
def get_full_ref_name(ref):
return '{name} ({version})'.format(
name=get_basic_ref_name(ref),
version=ref['version'])
# Builds the URL used to view the reference with the given UID
def get_ref_url(ref_uid):
return REF_URL_TEMPLATE.format(ref=ref_uid.upper())
# Normalizes format of reference content by removing superfluous whitespace
def normalize_ref_content(ref_content):
# Collapse consecutive spaces into a single space
ref_content = re.sub(r' {2,}', ' ', ref_content)
# Collapse sequences of three or more newlines into two
ref_content = re.sub(r'\n{3,}', '\n\n', ref_content)
# Strip leading/trailing whitespace for entire reference
ref_content = ref_content.strip()
# Strip leading/trailing whitespace for each paragraph
ref_content = re.sub(r' ?\n ?', '\n', ref_content)
return ref_content
|
import os
import hashlib
from scrapy.pipelines.files import FilesPipeline
from scrapy.utils.python import to_bytes
from six.moves.urllib.parse import urlparse
"""Pipeline to handle URL containing query parameters.
This pipeline is a temporary parser to remove existing query parameters in a URL string.
Example:
- The URL to download a gazette from Rio de Janeiro is http://doweb.rio.rj.gov.br/ler_pdf.php?download=ok&edi_id=3651
- Scrapy uses URL's sha1 to store the file locally
- However, Scrapy also uses the query parameter as file extension
Resulting filename: '4f15e18052b51dd8dffad1cc243279f40a2e21c3.php?download=ok&edi_id=3651'
- GazetteFilesPipeline ignores everything after file extension ('?download=ok&edi_id=3651')
and resulting in the following filename: '4f15e18052b51dd8dffad1cc243279f40a2e21c3.php'
This pipeline is temporary and could be deleted as soon as https://github.com/scrapy/scrapy/pull/2809 gets merged
For more details, see https://github.com/okfn-brasil/diario-oficial/issues/15
"""
class GazetteFilesPipeline(FilesPipeline):
def file_path(self, request, response=None, info=None):
url = request.url
media_guid = hashlib.sha1(
to_bytes(url)
).hexdigest() # the filename is the URL's sha1
media_ext = os.path.splitext(url)[1]
if not media_ext.isalnum():
media_ext = os.path.splitext(urlparse(url).path)[
1
] # remove everything after the file extension (like query param)
return "full/%s%s" % (media_guid, media_ext)
|
from blaze.mahimahi.server.filestore import check_cacheability
from blaze.mahimahi.server.filestore import CACHE_CONTROL_HEADER, EXPIRES_HEADER, PRAGMA_HEADER, LAST_MODIFIED_HEADER
class TestCheckCacheability:
def test_no_headers(self):
assert not check_cacheability({})
def test_not_cacheable(self):
tests = [
{CACHE_CONTROL_HEADER: "junk=123, max-age=0, junk4=1"},
{CACHE_CONTROL_HEADER: "junk=123, junk4=1, max-age=0"},
{CACHE_CONTROL_HEADER: "no-store, junk=123"},
{CACHE_CONTROL_HEADER: "junk=123, no-cache"},
{CACHE_CONTROL_HEADER: "junk=123, no-cache, max-age=123"},
{PRAGMA_HEADER: "no-cache"},
{EXPIRES_HEADER: "0"},
{EXPIRES_HEADER: "0", CACHE_CONTROL_HEADER: "max-age=0"},
{CACHE_CONTROL_HEADER: "max-age=asdf"},
{CACHE_CONTROL_HEADER: "max-age=asdf", EXPIRES_HEADER: "0"},
]
for test in tests:
assert not check_cacheability(test), f"failed: {test}"
def test_cacheable(self):
tests = [
{CACHE_CONTROL_HEADER: "max-age=123"},
{CACHE_CONTROL_HEADER: "max-age=asdf", EXPIRES_HEADER: "1600"},
{EXPIRES_HEADER: "1600"},
{LAST_MODIFIED_HEADER: "today"},
{LAST_MODIFIED_HEADER: "today", CACHE_CONTROL_HEADER: "max-age=asdf"},
]
for test in tests:
assert check_cacheability(test), f"failed: {test}"
|
from datetime import datetime
from piccolo.apps.user.tables import BaseUser
from piccolo.columns import Boolean, ForeignKey, Timestamp, Varchar
from piccolo.table import Table
class Task(Table):
"""
An example table.
"""
name = Varchar()
completed = Boolean(default=False)
created_at = Timestamp()
task_user = ForeignKey(BaseUser)
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["Catalog"]
import os
import requests
import pandas as pd
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
class Catalog(object):
url = ("http://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/"
"nph-nstedAPI?table=k2targets&select=*")
def __init__(self, filename):
self._df = None
self.name = "epic"
self.filename = filename
@property
def df(self):
if self._df is None:
self.download()
self._df = pd.read_hdf(self.filename, self.name)
return self._df
def download(self, clobber=False):
if os.path.exists(self.filename) and not clobber:
return
# Request the table.
r = requests.get(self.url)
if r.status_code != requests.codes.ok:
r.raise_for_stataus()
# Load the contents using pandas.
self._df = pd.read_csv(StringIO(r.content))
# Save it to an HDF5 file.
try:
os.makedirs(os.path.dirname(os.path.abspath(self.filename)))
except os.error:
pass
self._df.to_hdf(self.filename, self.name, format="t")
|
from loguru import logger
from .Models import DataBase
from .Models.User import User
import settings
#from Models.Admin import Admin
#from Models.Moderator import Moderator
class UserManager():
@logger.catch
def __init__(self):
DataBase.Init(settings.DB_CONN_STR)
self._session = None
def RequirePermissin(self, **kwargs):
return self.Wraps
def Wraps(self, func):
def wrapper(message):
self._session = DataBase.GetSession()
func(
message,
self._addandorgetuser(
message.from_user.id,
message.from_user.username,
message.from_user.first_name,
message.from_user.last_name
)
)
self._session.close()
return wrapper
@logger.catch
def AddUser(self, Tg_id, UserName, FirstName, LastName):
self._session = DataBase.GetSession()
self._addandorgetuser(Tg_id, UserName, FirstName, LastName)
self._session.close()
@logger.catch
def _addandorgetuser(self, Tg_id, UserName, FirstName, LastName):
u = User(Tg_id, UserName, FirstName, LastName)
newuser = False
if self._session.query(User).filter_by(Tg_id=Tg_id).first() is None:
self._session.add(u)
self._session.commit()
newuser = True
logger.debug(f"User <{u}> was requesed ({'Alredy exists' if not newuser else 'Has been created'})")
#session.close()
return u
|
def get_all_py_path(root):
results = []
for dirpath, dirnames, filenames in os.walk(root):
dirpath = dirpath+'\\'
for filename in filenames:
if filename[0]!='.':
filename = dirpath+filename
if filename[len(filename)-3:len(filename)]=='.py' and filename[0]!='.':
results.append(filename)
return results
def get_all_py_content(file_paths):
codes = []
for path in file_paths:
f = open(path,'r+',errors='ignore')
code = f.read()
f.close()
codes.append(code)
return codes
|
__title__ = 'FuelSDKWrapper'
__version__ = '1.3.4'
__author__ = 'Seb Angel'
__license__ = 'MIT'
|
import os
import sys
DIRNAME = os.path.dirname(__file__)
sys.path.insert(0, os.path.dirname(os.path.dirname(DIRNAME)))
from fabdeploy import monkey; monkey.patch_all()
from fabric.api import *
from fabdeploy.api import *; setup_fabdeploy()
@task
def execute():
print env.conf.db.execute
|
def init_actions_(service, args):
"""
this needs to returns an array of actions representing the depencies between actions.
Looks at ACTION_DEPS in this module for an example of what is expected
"""
# some default logic for simple actions
return {
'test': ['install']
}
def test(job):
"""
Test recurring actions with hanging jobs
"""
import sys
import os
import time
import threading
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
try:
expected_nr_of_jobs = 0
curdir = os.getcwd()
j.atyourservice.reposDiscover()
repo = j.atyourservice.repoGet(j.sal.fs.joinPaths(j.dirs.codeDir, 'github/jumpscale/jumpscale_core8/tests/sample_repo_recurring'))
repos.append(repo)
bp_path = j.sal.fs.joinPaths(repo.path, 'blueprints', 'test_recurring_actions_hanging_jobs.yaml')
repo.blueprintExecute(path=bp_path)
# find the service and retrieve the timeout value
srv = repo.serviceGet('test_recurring_actions_1', 'hanging')
timeout = srv.model.data.timeout
thread = threading.Thread(target=job.service.executor.execute, args=("ays run --ask", ), daemon=True)
start_time = time.time()
os.chdir(repo.path)
thread.start()
time.sleep((timeout * 60) + 60) # add one minute to the configured timeout
end_time = time.time()
nr_of_jobs = len(j.core.jobcontroller.db.jobs.find(actor='test_recurring_actions_1', service='hanging',
action='execute_hanging_job', fromEpoch=start_time,
toEpoch=end_time))
if nr_of_jobs != expected_nr_of_jobs:
failures.append('Wrong number of jobs found. Expected [%s] found [%s]' % (expected_nr_of_jobs, nr_of_jobs))
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
|
# -*- coding: utf-8 -*-
"""Includes class to calculate the network-based features."""
import logging
import multiprocessing as mp
import numpy as np
import pandas as pd
from ppi_network_annotation.model.network import Network
from ppi_network_annotation.model.neighborhood_network import NeighborhoodNetwork
from igraph import Vertex, VertexSeq
from scipy import sparse
from sklearn.preprocessing import normalize
from tqdm import tqdm
logger = logging.getLogger(__name__)
np.set_printoptions(precision=3)
class NodeScorer:
"""Class for calculating features using interaction and differential expression information."""
def __init__(self, network: Network):
"""Construct the object.
:param network: The PPI network with differential gene expression annotation.
"""
self.ppi_network = network
self.ppi_network.graph.simplify(combine_edges=min)
self.neighborhood_network = NeighborhoodNetwork(network)
def score_nodes(self, diff_type: str) -> pd.DataFrame:
"""Score nodes using all network measures and write to a file.
:param feature_path: Path to write the file.
:param diff_type: Differential expression type to be chosen by the user; all, down, or up.
"""
logger.info("In extract_features()")
neighborhood_scores = self.score_neighborhood()
interconnectivity2_scores = self.score_interconnectivity(diff_type, "second-degree")
random_walk_scores = self.score_by_random_walk(diff_type)
network_prop_scores = self.score_by_network_propagation(diff_type)
local_radiality_scores = self.score_local_radiality(diff_type)
print(local_radiality_scores)
df = pd.DataFrame({
"GeneID": self.ppi_network.graph.vs["name"],
"Neighborhood": neighborhood_scores,
"Interconnectivity": interconnectivity2_scores,
"RandomWalk": random_walk_scores,
"NetworkProp": network_prop_scores,
"LocalRadiality": local_radiality_scores
})
#
# logger.info('Writing network to %s', feature_path)
# df.to_csv(feature_path,
# encoding="utf-8",
# sep="\t",
# index=False)
return df
def score_neighborhood(self) -> list:
"""Score all nodes using neighborhood scoring algorithm.
:return list: A list of scores, sorted by node index.
"""
logger.info("In neighborhood_scoring()")
return list(map(self._neighborhood, self.ppi_network.graph.vs))
def _neighborhood(self, node: Vertex) -> float:
"""Score a node based on its and its neighbours' log fold change.
:param Vertex node: Node to be scored.
:return float: Score of the node.
"""
node_fc = abs(node["l2fc"])
sum_fc = 0
for n in node.neighbors():
sum_fc += abs(n["l2fc"])
if len(node.neighbors()) > 0:
return 0.5 * node_fc + 0.5 * sum_fc / len(node.neighbors())
else:
return 0
def score_interconnectivity(self, diff_type: str = "all",
neighbor_type: str = "direct") -> list:
"""Score all nodes based on interconnectivity algorithm.
:param str diff_type: Differential expression type chosen by the user; all, down, or up.
:param str neighbor_type: The degree of neighborhood relationship; direct or second-degree.
:return list: A list of scores, sorted by node index.
"""
logger.info("In interconnectivity_nodes()")
icn_mat = self._interconnectivity_edges(diff_type, neighbor_type)
diff_expr = self.ppi_network.get_differentially_expressed_genes(diff_type)
icn = np.sum(icn_mat[diff_expr.indices, :], axis=0) / len(diff_expr)
return list(icn)
def _interconnectivity_edges(self, diff_type: str = "all",
neighbor_type: str = "direct") -> np.ndarray:
"""Score pairs of nodes based on their shared neighborhood.
:param str diff_type: Differential expression type chosen by the user; all, down, or up.
:param str neighbor_type: The degree of neighborhood relationship; direct or second-degree.
:return np.ndarray: A matrix of scores for pairs.
"""
key = self._get_diff_expr_key(diff_type)
nodes = list(self.ppi_network.graph.vs)
degrees = self.ppi_network.graph.degree(nodes)
icn_mat = np.zeros([len(nodes), len(nodes)], dtype=float)
diff_expressed = self._get_diff_expr_vertices(diff_type).indices
edges = self.ppi_network.graph.es.select(_within=diff_expressed)
for edge in tqdm(edges, desc="Interconnectivity"):
icn_score, source, target = self._interconnectivity_edge(degrees, edge, key,
neighbor_type)
icn_mat[source.index, target.index] = icn_score
icn_mat[target.index, source.index] = icn_score
return icn_mat
def _interconnectivity_edge(self, degrees, edge, key, neighbor_type) -> tuple:
"""Calculate the inteconnectivity score of one edge.
:param degrees: Degrees of all nodes.
:param edge: The edge for which the interconnectivity score will be calculated.
:param key: Differential expression type, up_regulated, down_regulated or diff_expressed.
:param neighbor_type: The degree of neighborhood relationship; direct or second-degree.
:return: Interconnectivity score of the edge, source and target vertices of the edge
"""
source = self.ppi_network.graph.vs.find(edge.source)
target = self.ppi_network.graph.vs.find(edge.target)
icn_score = 0
if edge != -1 and (source[key] or target[key]):
overlap = self.neighborhood_network.get_neighborhood_overlap(source, target,
neighbor_type)
mult_degrees = degrees[source.index] * degrees[target.index]
if mult_degrees > 0:
icn_score = (2 + len(overlap)) / np.sqrt(mult_degrees)
return icn_score, source, target
def score_local_radiality(self, diff_type: str = "all") -> list:
self.diff_expressed = self._get_diff_expr_vertices(diff_type).indices
try:
pool = mp.Pool()
scores = pool.map(self._local_radiality, self.ppi_network.graph.vs)
except:
pass
finally:
pool.close()
return scores
def _local_radiality(self, v):
shortest_paths = self.ppi_network.graph.get_shortest_paths(v, to=self.diff_expressed)
lengths = [len(path) for path in shortest_paths]
return sum(lengths) / len(self.diff_expressed)
def score_by_random_walk(self, diff_type: str = "all") -> list:
"""Score nodes using random walk algorithm (Koehler et al).
:param str diff_type: Differential expression type chosen by the user; all, down, or up.
:return list: List of scores, sorted by node index.
"""
logger.info("In random_walk()")
self._random_walk_init(diff_type)
adj = sparse.coo_matrix(
np.array(self.ppi_network.graph.get_adjacency().data, dtype="float64")
)
adj = normalize(adj, norm="l1", axis=0) # column normalized
return self._walk_randomly(adj, "random_walk_score", 0.5)
def _random_walk_init(self, diff_type: str = "all") -> None:
"""Initialize the graph for random walk algorithm.
By setting attribute "random_walk_score" to 1/no_of_diff_expressed
for differentially expressed genes.
:param str diff_type: Differential expression type chosen by the user; all, down, or up.
"""
self.ppi_network.graph.vs["random_walk_score"] = 0
if diff_type == "up":
prob = 1 / len(self.ppi_network.graph.vs.select(up_regulated_eq=True))
self.ppi_network.graph.vs.select(up_regulated_eq=True)["random_walk_score"] = prob
elif diff_type == "down":
prob = 1 / len(self.ppi_network.graph.vs.select(down_regulated_eq=True))
self.ppi_network.graph.vs.select(down_regulated_eq=True)["random_walk_score"] = prob
else:
prob = 1 / len(self.ppi_network.graph.vs.select(diff_expressed_eq=True))
self.ppi_network.graph.vs.select(diff_expressed_eq=True)["random_walk_score"] = prob
def score_by_network_propagation(self, diff_type: str = "all") -> list:
"""Score nodes using network propagation algorithm.
:param str diff_type: Differential expression type chosen by the user; all, down, or up.
:return list: A list of scores, sorted by node index.
"""
logger.info("In propagate_network()")
self._propagate_network_init(diff_type)
adj = sparse.dok_matrix(
np.array(self.ppi_network.graph.get_adjacency().data, dtype="float64")
)
# normalized by the degrees of source and target nodes
adj = self._normalize_by_degrees(adj)
return self._walk_randomly(adj, "network_prop_score", 0.5)
def _propagate_network_init(self, diff_type: str = "all") -> None:
"""Initialize the graph for network propagation algorithm.
By setting attribute "network_prop_score" to 1 for differentially
expressed genes.
:param str diff_type: Differential expression type chosen by the user; all, down, or up.
"""
self.ppi_network.graph.vs["network_prop_score"] = 0
vertices = self.ppi_network.graph.vs
if diff_type == "up":
vertices.select(up_regulated_eq=True)["network_prop_score"] = 1
elif diff_type == "down":
vertices.select(down_regulated_eq=True)["network_prop_score"] = 1
else:
vertices.select(diff_expressed_eq=True)["network_prop_score"] = 1
def _normalize_by_degrees(self, adj: sparse.dok_matrix) -> sparse.dok_matrix:
"""Normalize an adjacency matrix based on the node degrees(Vanunu et al).
:param adj: Adjacency matrix to be normalized.
:return: Normalized adjacency matrix.
"""
row_sums = np.sum(adj, axis=0)
dia = row_sums + 1
norm_adj = sparse.dok_matrix(np.zeros(adj.shape))
for key in adj.keys():
norm_adj[key] = adj[key] / np.sqrt(dia[0, key[0]] * dia[0, key[1]])
return norm_adj
def _walk_randomly(self, adj, score_type: str, alpha: float = 0.5) -> list:
""" Randomly walk on the network while updating the visitation probabilities.
:param adj: Normalized adjacency matrix.
:param score_type: One of random_walk_score, diffusion_score, or network_prop_score.
:param alpha: Probability of restarting the walk.
:return: Vector of updated visitation probabilities.
"""
# initialize for first round
p0 = self.ppi_network.graph.vs[score_type]
pt1 = p0
pt2 = self._update_visitation_probabilities(p0, pt1, adj, alpha)
while self._l1_norm(pt1, pt2) > 10 ** -6:
pt1 = pt2
pt2 = self._update_visitation_probabilities(p0, pt1, adj, alpha)
return list(pt2)
def _update_visitation_probabilities(self, p0, p1, adj, alpha: float = 0.5) -> np.ndarray:
"""Update the visitation probabilities.
:param p0: scores at time point 0.
:param p1: scores at time point t.
:param alpha: Weighting factor.
:return: p2: scores at time point t+1.
"""
p1 = np.array(p1, dtype="float64")
p0 = np.array(p0, dtype="float64")
p2 = (1 - alpha) * adj.dot(p1) + alpha * p0
return p2
def _l1_norm(self, v1: np.ndarray, v2: np.ndarray) -> float:
"""Calculate the L1 norm of two vectors.
:param v1: Vector 1.
:param v2: Vector 2.
:return: L1 norm of v1 and v2.
"""
return sum(
abs(a - b)
for a, b in zip(v1, v2)
)
def _get_diff_expr_vertices(self, diff_type: str) -> VertexSeq:
""" Get the vertices associated with differentially expressed genes.
:param str diff_type: Differential expression type chosen by the user; all, down, or up.
:return: Set of vertices associated with differentially expressed genes.
"""
if diff_type == "up":
return self.ppi_network.graph.vs.select(up_regulated_eq=True)
if diff_type == "down":
return self.ppi_network.graph.vs.select(down_regulated_eq=True)
return self.ppi_network.graph.vs.select(diff_expressed_eq=True)
def _get_diff_expr_key(self, diff_type: str) -> str:
"""Get the network key of different types of differentially expressed genes.
:param str diff_type: Differential expression type chosen by the user; all, down, or up.
:return: Network key of the inputted diff_type.
"""
if diff_type == "up":
return "up_regulated"
if diff_type == "down":
return "down_regulated"
return "diff_expressed"
|
# -*- coding: utf-8 -*-
"""
LaTeX Lexer
~~~~~~~~~~~
This module contains all classes for lexing LaTeX code, as well as
general purpose base classes for incremental LaTeX decoders and
encoders, which could be useful in case you are writing your own
custom LaTeX codec.
.. autoclass:: Token(name, text)
.. autoclass:: LatexLexer
:show-inheritance:
:members:
.. autoclass:: LatexIncrementalLexer
:show-inheritance:
:members:
.. autoclass:: LatexIncrementalDecoder
:show-inheritance:
:members:
.. autoclass:: LatexIncrementalEncoder
:show-inheritance:
:members:
"""
# Copyright (c) 2003, 2008 David Eppstein
# Copyright (c) 2011-2020 Matthias C. M. Troffaes
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import codecs
import collections
import re
from six import add_metaclass, binary_type, string_types
import unicodedata
Token = collections.namedtuple("Token", "name text")
# implementation note: we derive from IncrementalDecoder because this
# class serves excellently as a base class for incremental decoders,
# but of course we don't decode yet until later
class MetaRegexpLexer(type):
"""Metaclass for :class:`RegexpLexer`. Compiles tokens into a
regular expression.
"""
def __init__(cls, name, bases, dct):
super(MetaRegexpLexer, cls).__init__(name, bases, dct)
regexp_string = (u"|".join(
u"(?P<" + name + u">" + regexp + u")"
for name, regexp in cls.tokens))
cls.regexp = re.compile(regexp_string, re.DOTALL)
@add_metaclass(MetaRegexpLexer)
class RegexpLexer(codecs.IncrementalDecoder):
"""Abstract base class for regexp based lexers."""
emptytoken = Token(u"unknown", u"")
"""The empty token."""
tokens = ()
"""Tuple containing all token regular expressions."""
def __init__(self, errors='strict'):
"""Initialize the codec."""
self.errors = errors
self.reset()
def reset(self):
"""Reset state."""
# buffer for storing last (possibly incomplete) token
self.raw_buffer = self.emptytoken
def getstate(self):
"""Get state."""
return (self.raw_buffer.text, 0)
def setstate(self, state):
"""Set state. The *state* must correspond to the return value
of a previous :meth:`getstate` call.
"""
self.raw_buffer = Token('unknown', state[0])
def get_raw_tokens(self, chars, final=False):
"""Yield tokens without any further processing. Tokens are one of:
- ``\\<word>``: a control word (i.e. a command)
- ``\\<symbol>``: a control symbol (i.e. \\^ etc.)
- ``#<n>``: a parameter
- a series of byte characters
"""
if self.raw_buffer.text:
chars = self.raw_buffer.text + chars
self.raw_buffer = self.emptytoken
for match in self.regexp.finditer(chars):
# yield the buffer token
if self.raw_buffer.text:
yield self.raw_buffer
# fill buffer with next token
self.raw_buffer = Token(match.lastgroup, match.group(0))
if final:
for token in self.flush_raw_tokens():
yield token
def flush_raw_tokens(self):
"""Flush the raw token buffer."""
if self.raw_buffer.text:
yield self.raw_buffer
self.raw_buffer = self.emptytoken
class LatexLexer(RegexpLexer):
"""A very simple lexer for tex/latex."""
# implementation note: every token **must** be decodable by inputenc
tokens = (
# match newlines and percent first, to ensure comments match correctly
(u'control_symbol_x2', r'[\\][\\]|[\\]%'),
# comment: for ease, and for speed, we handle it as a token
(u'comment', r'%[^\n]*'),
# control tokens
# in latex, some control tokens skip following whitespace
# ('control-word' and 'control-symbol')
# others do not ('control-symbol-x')
# XXX TBT says no control symbols skip whitespace (except '\ ')
# XXX but tests reveal otherwise?
(u'control_word', r'[\\][a-zA-Z]+'),
(u'control_symbol', r'[\\][~' r"'" r'"` =^!.]'),
# TODO should only match ascii
(u'control_symbol_x', r'[\\][^a-zA-Z]'),
# parameter tokens
# also support a lone hash so we can lex things like '#a'
(u'parameter', r'\#[0-9]|\#'),
# any remaining characters; for ease we also handle space and
# newline as tokens
# XXX TBT does not mention \t to be a space character as well
# XXX but tests reveal otherwise?
(u'space', r' |\t'),
(u'newline', r'\n'),
(u'mathshift', r'[$][$]|[$]'),
# note: some chars joined together to make it easier to detect
# symbols that have a special function (i.e. --, ---, etc.)
(u'chars',
r'---|--|-|[`][`]'
r"|['][']"
r'|[?][`]|[!][`]'
# separate chars because brackets are optional
# e.g. fran\\c cais = fran\\c{c}ais in latex
# so only way to detect \\c acting on c only is this way
r'|(?![ %#$\n\t\\]).'),
# trailing garbage which we cannot decode otherwise
# (such as a lone '\' at the end of a buffer)
# is never emitted, but used internally by the buffer
(u'unknown', r'.'),
)
"""List of token names, and the regular expressions they match."""
class LatexIncrementalLexer(LatexLexer):
"""A very simple incremental lexer for tex/latex code. Roughly
follows the state machine described in Tex By Topic, Chapter 2.
The generated tokens satisfy:
* no newline characters: paragraphs are separated by '\\par'
* spaces following control tokens are compressed
"""
partoken = Token(u"control_word", u"\\par")
spacetoken = Token(u"space", u" ")
replacetoken = Token(u"chars", u"\ufffd")
curlylefttoken = Token(u"chars", u"{")
curlyrighttoken = Token(u"chars", u"}")
def reset(self):
super(LatexIncrementalLexer, self).reset()
# three possible states:
# newline (N), skipping spaces (S), and middle of line (M)
self.state = 'N'
# inline math mode?
self.inline_math = False
def getstate(self):
# state 'M' is most common, so let that be zero
return (
self.raw_buffer,
{'M': 0, 'N': 1, 'S': 2}[self.state] |
(4 if self.inline_math else 0)
)
def setstate(self, state):
self.raw_buffer = state[0]
self.state = {0: 'M', 1: 'N', 2: 'S'}[state[1] & 3]
self.inline_math = bool(state[1] & 4)
def get_tokens(self, chars, final=False):
"""Yield tokens while maintaining a state. Also skip
whitespace after control words and (some) control symbols.
Replaces newlines by spaces and \\par commands depending on
the context.
"""
# current position relative to the start of chars in the sequence
# of bytes that have been decoded
pos = -len(self.raw_buffer.text)
for token in self.get_raw_tokens(chars, final=final):
pos = pos + len(token.text)
assert pos >= 0 # first token includes at least self.raw_buffer
if token.name == u'newline':
if self.state == 'N':
# if state was 'N', generate new paragraph
yield self.partoken
elif self.state == 'S':
# switch to 'N' state, do not generate a space
self.state = 'N'
elif self.state == 'M':
# switch to 'N' state, generate a space
self.state = 'N'
yield self.spacetoken
else:
raise AssertionError(
"unknown tex state {0!r}".format(self.state))
elif token.name == u'space':
if self.state == 'N':
# remain in 'N' state, no space token generated
pass
elif self.state == 'S':
# remain in 'S' state, no space token generated
pass
elif self.state == 'M':
# in M mode, generate the space,
# but switch to space skip mode
self.state = 'S'
yield token
else:
raise AssertionError(
"unknown state {0!r}".format(self.state))
elif token.name == u'mathshift':
self.inline_math = not self.inline_math
self.state = 'M'
yield token
elif token.name == u'parameter':
self.state = 'M'
yield token
elif token.name == u'control_word':
# go to space skip mode
self.state = 'S'
yield token
elif token.name == u'control_symbol':
# go to space skip mode
self.state = 'S'
yield token
elif (token.name == u'control_symbol_x'
or token.name == u'control_symbol_x2'):
# don't skip following space, so go to M mode
self.state = 'M'
yield token
elif token.name == u'comment':
# no token is generated
# note: comment does not include the newline
self.state = 'S'
elif token.name == 'chars':
self.state = 'M'
yield token
elif token.name == u'unknown':
if self.errors == 'strict':
# current position within chars
# this is the position right after the unknown token
raise UnicodeDecodeError(
"latex", # codec
chars.encode('utf8'), # problematic input
pos - len(token.text), # start of problematic token
pos, # end of it
"unknown token {0!r}".format(token.text))
elif self.errors == 'ignore':
# do nothing
pass
elif self.errors == 'replace':
yield self.replacetoken
else:
raise NotImplementedError(
"error mode {0!r} not supported".format(self.errors))
else:
raise AssertionError(
"unknown token name {0!r}".format(token.name))
class LatexIncrementalDecoder(LatexIncrementalLexer):
"""Simple incremental decoder. Transforms lexed LaTeX tokens into
unicode.
To customize decoding, subclass and override
:meth:`get_unicode_tokens`.
"""
inputenc = "ascii"
"""Input encoding. **Must** extend ascii."""
binary_mode = True
"""Whether this lexer processes binary data (bytes) or text data
(unicode).
"""
def __init__(self, errors='strict'):
super(LatexIncrementalDecoder, self).__init__(errors)
self.decoder = codecs.getincrementaldecoder(self.inputenc)(errors)
def decode_token(self, token):
"""Returns the decoded token text.
.. note::
Control words get an extra space added at the back to make
sure separation from the next token, so that decoded token
sequences can be joined together.
For example, the tokens ``u'\\hello'`` and ``u'world'``
will correctly result in ``u'\\hello world'`` (remember
that LaTeX eats space following control words). If no space
were added, this would wrongfully result in
``u'\\helloworld'``.
"""
text = token.text
return text if token.name != u'control_word' else text + u' '
def get_unicode_tokens(self, chars, final=False):
"""Decode every token. Override to
process the tokens in some other way (for example, for token
translation).
"""
for token in self.get_tokens(chars, final=final):
yield self.decode_token(token)
def decode(self, bytes_, final=False):
"""Decode LaTeX *bytes_* into a unicode string.
This implementation calls :meth:`get_unicode_tokens` and joins
the resulting unicode strings together.
"""
if self.binary_mode:
try:
# in python 3, the token text can be a memoryview
# which do not have a decode method; must cast to
# bytes explicitly
chars = self.decoder.decode(binary_type(bytes_), final=final)
except UnicodeDecodeError as e:
# API requires that the encode method raises a ValueError
# in this case
raise ValueError(e)
else:
chars = bytes_
return u''.join(self.get_unicode_tokens(chars, final=final))
class LatexIncrementalEncoder(codecs.IncrementalEncoder):
"""Simple incremental encoder for LaTeX. Transforms unicode into
:class:`bytes`.
To customize decoding, subclass and override
:meth:`get_latex_bytes`.
"""
inputenc = "ascii"
"""Input encoding. **Must** extend ascii."""
binary_mode = True
"""Whether this lexer processes binary data (bytes) or text data
(unicode).
"""
def __init__(self, errors='strict'):
"""Initialize the codec."""
self.errors = errors
self.reset()
def reset(self):
"""Reset state."""
# buffer for storing last (possibly incomplete) token
self.buffer = u""
def getstate(self):
"""Get state."""
return self.buffer
def setstate(self, state):
"""Set state. The *state* must correspond to the return value
of a previous :meth:`getstate` call.
"""
self.buffer = state
def get_unicode_tokens(self, unicode_, final=False):
"""Split unicode into tokens so that every token starts with a
non-combining character.
"""
if not isinstance(unicode_, string_types):
raise TypeError(
"expected unicode for encode input, but got {0} instead"
.format(unicode_.__class__.__name__))
for c in unicode_:
if not unicodedata.combining(c):
for token in self.flush_unicode_tokens():
yield token
self.buffer += c
if final:
for token in self.flush_unicode_tokens():
yield token
def flush_unicode_tokens(self):
"""Flush the buffer."""
if self.buffer:
yield self.buffer
self.buffer = u""
def get_latex_chars(self, unicode_, final=False):
"""Encode every character. Override to
process the unicode in some other way (for example, for character
translation).
"""
for token in self.get_unicode_tokens(unicode_, final=final):
yield token
def encode(self, unicode_, final=False):
"""Encode the *unicode_* string into LaTeX :class:`bytes`.
This implementation calls :meth:`get_latex_chars` and joins
the resulting :class:`bytes` together.
"""
chars = u''.join(self.get_latex_chars(unicode_, final=final))
if self.binary_mode:
try:
return chars.encode(self.inputenc, self.errors)
except UnicodeEncodeError as e:
# API requires that the encode method raises a ValueError
# in this case
raise ValueError(e)
else:
return chars
class UnicodeLatexIncrementalDecoder(LatexIncrementalDecoder):
binary_mode = False
class UnicodeLatexIncrementalEncoder(LatexIncrementalEncoder):
binary_mode = False
|
import paho.mqtt.client as mqtt #pip3 install paho-mqtt
import argparse
import os
import sys
import math
import time
import ntplib #pip3 install ntplib
from queue import Queue
DEST_FILE_PATH = 'ReceivedFiles/'
REPORT_FILE_PATH = 'Report/'
class Client:
def __init__(self, bkr_addr, bkr_port, file, topic, qos, count, type):
self.bkr_addr = bkr_addr
self.bkr_port = bkr_port
self.file = file
self.topic = topic
self.qos = qos
self.count = count
self.type = type
self.client = None
self.time_offset = 0
def __init__(self, bkr_addr, bkr_port, type):
self.bkr_addr = bkr_addr
self.bkr_port = bkr_port
self.type = type
self.client = None
self.time_offset = 0
# Referenced code from Google example found at:
# https://www.programcreek.com/python/example/91316/ntplib.NTPClient
def SyncClockToNtp(self, retries, server):
"""Syncs the hardware clock to an NTP server."""
attempts = 0
ntp_client = ntplib.NTPClient()
response = None
while True:
try:
response = ntp_client.request(server, version=3)
except:
print('NTP client request error')
if response or attempts >= retries:
break
time.sleep(3)
attempts += 1
self.time_offset = response.offset
print(self.time_offset)
def connect(self):
# Sync system clock to NTP server
ntp_response = self.SyncClockToNtp(2, "time.google.com")
# Define MQTT Client and connect to Broker
self.client = mqtt.Client(client_id="MQTT_FT_"+self.type+"_"+str(time.time()), clean_session=False)
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.on_subscribe = self.on_subscribe
self.client.on_publish = self.on_publish
# Add some helper parameters to client oject
self.client.is_connected_flag = False
self.client.is_subscribed_flag = False
self.client.publish_is_complete_flag = False
self.message_queue = Queue()
try:
self.client.connect(self.bkr_addr, self.bkr_port)
self.client.loop_start()
while not self.client.is_connected_flag:
continue
except:
print("Could not connect to MQTT broker: " + self.bkr_addr + ":" + self.bkr_port)
sys.exit()
# The callback for when the client receives a CONNACK response from the server.
def on_connect(self, client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.is_connected_flag = True
def on_subscribe(self, client, userdata, mid, granted_qos):
print("Subscribed with QOS: " + str(granted_qos[0]))
client.is_subscribed_flag = True
# The callback for when a PUBLISH message is received from the server.
def on_message(self, client, userdata, msg):
# if msg.topic.split("/")[0] == 'Report':
# fout=open(msg.topic,"wb")
# fout.write(msg.payload)
# fout.close()
# return
payload_length = len(msg.payload)
# Fixed Header
header_size = 1 # Control Byte
if payload_length < 2 ** 7:
header_size += 1 # Packet Length < 128: 1 byte
elif payload_length < 2 ** 14:
header_size += 2 # Packet Length >= 128 < 16383: 2 bytes
elif payload_length < 2 ** 21:
header_size += 3 # Packet Length >= 16384 < 2097151: 3 bytes
else:
header_size += 4 # Packet Length >= 2097151 < 268435455: 4 bytes
# Variable Header
header_size += 2 # Fixed topic length field
header_size += len(self.topic) # Topic
header_size += 2 # Message id field
fo_stats.write(str(time.time()+self.time_offset) + "," + str(payload_length + header_size) + "\n")
# Write the file later to allow the function to return
self.message_queue.put(msg.payload)
client.num_times_to_loop -= 1
print("Received File Copy (%d of %d)" %(self.count - client.num_times_to_loop, self.count))
def publish_data(self, payload):
if self.file.split("/")[0] != 'Report':
fo_stats.write(str(time.time()+self.time_offset)+",\n")
self.client.publish_is_complete_flag = False
self.client.publish(self.topic, payload, self.qos)
def on_publish(self, client,userdata,result): #create function for callback
client.publish_is_complete_flag = True
def disconnect(self):
self.client.disconnect()
self.client.loop_stop()
def publish(self, file, topic, qos, count):
self.file = file
self.topic = topic
self.qos = qos
self.count = count
self.client.num_times_to_loop = count
global fo_stats
filename = REPORT_FILE_PATH + self.file.split("/")[-1] + "_" + "publish" + "_qos_" + str(qos) + "_stats.csv"
os.makedirs(os.path.dirname(filename), exist_ok=True)
fo_stats = open(filename, "w")
try:
with open(self.file,"rb") as f:
file_size = os.path.getsize(self.file)
while self.client.num_times_to_loop:
print("Sent File Copy (%d of %d)" %(self.count-self.client.num_times_to_loop+1, self.count))
f.seek(0)
self.publish_data(f.read(file_size))
while not self.client.publish_is_complete_flag:
continue
self.client.num_times_to_loop -= 1
except FileNotFoundError:
print("Cannot open file: " + self.file)
sys.exit()
def subscribe(self, file, topic, qos, count):
# self.client = self.connect(self.bkr_addr, self.bkr_port, self.count, "subscriber")
self.file = file
self.topic = topic
self.qos = qos
self.count = count
self.client.num_times_to_loop = count
global fo_stats
filename = REPORT_FILE_PATH + self.file.split("/")[-1] + "_" + "subscriber" + "_qos_" + str(qos) + "_stats.csv"
os.makedirs(os.path.dirname(filename), exist_ok=True)
fo_stats = open(filename,"w")
# Subscribing to topic and waiting for SUBACK
print("Subscribing to topic: " + self.topic + ", QOS:" + str(self.qos))
self.client.subscribe(self.topic, self.qos)
while not self.client.is_subscribed_flag:
continue
# Loop until we've received and written the required number of file copies
while (self.client.num_times_to_loop > 0) or (not self.message_queue.empty()):
message_payload = self.message_queue.get()
if not message_payload is None:
fout=open(self.file,"wb")
fout.write(message_payload)
fout.close()
continue
def subscribe_report(self, file):
# self.client = self.connect(self.bkr_addr, self.bkr_port, self.count, "subscriber")
self.file = file
self.client.subscribe(file, 2)
while not self.client.is_subscribed_flag:
continue
def publish_report(self, file):
self.file = file
try:
with open(self.file,"rb") as f:
file_size = os.path.getsize(self.file)
f.seek(0)
self.publish_data(f.read(file_size))
except FileNotFoundError:
print("Cannot open file: " + self.file)
sys.exit()
if __name__ == "__main__":
if True:
# use this code block for publish
client = Client('192.168.1.9', 1883, '10MB', 'topic1', 1, 10, 'publisher')
client.connect()
client.publish()
else:
# Use this code block for client
client = Client('192.168.1.9', 1883, 'op', 'topic1', 1, 10, 'subscriber')
client.connect()
client.subscribe()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Author : yanyongyu
@Date : 2020-05-19 22:29:14
@LastEditors : yanyongyu
@LastEditTime : 2020-05-26 14:28:58
@Description : None
@GitHub : https://github.com/yanyongyu
"""
__author__ = "yanyongyu"
import random
from typing import List
import pygame
import numpy as np
from .tetris import Tetris
from .tetris import ITetris, TTetris, LTetris, JTetris, OTetris, ZTetris1, ZTetris2
class Matrix(pygame.sprite.Sprite):
"""Matrix
Attributes:
matrix (numpy.ndarray): matrix (25x16)
filled_rect (pygame.Surface): filled rectangle
unfilled_rect (pygame.Surface): unfilled rectangle
image (pygame.Surface): surface
rect (pygame.Rect): rect
bag (List[Tetris]): 7bag
current (Tetris): current tetris
next (Tetris): next tetris
clearing (bool): whether there are lines to clear
clear_delay (int): delay of clearing
clear_lines (numpy.ndarray): array of lines whether to clear or not
clear_rects (List[pygame.Surface]): List of clearing animation surfaces
"""
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.matrix = np.zeros((25, 16), dtype=np.int)
self.matrix[:, :3] = 1
self.matrix[:, -3:] = 1
self.matrix[-3:, :] = 1
self.unfilled_rect = pygame.Surface((18, 18)).convert_alpha()
self.filled_rect = pygame.Surface((18, 18)).convert_alpha()
for i in range(20):
for j in range(20):
if i < 2 or i > 15 or j < 2 or j > 15 or (3 < i < 14 and
3 < j < 14):
self.unfilled_rect.set_at((i, j), (135, 147, 114, 255))
self.filled_rect.set_at((i, j), (0, 0, 0, 255))
else:
self.unfilled_rect.set_at((i, j), (135, 147, 114, 0))
self.filled_rect.set_at((i, j), (0, 0, 0, 0))
self.clear_rects = []
for index in range(8):
surface = self.unfilled_rect.copy()
for i in range(2 + index, 16 - index):
for j in range(2 + index, 16 - index):
surface.set_at((i, j), (0, 0, 0, 255))
self.clear_rects.append(surface)
self.bag = self.fill_bag()
self.current = self.bag.pop(random.randint(0, len(self.bag) - 1))
self.next = self.bag.pop(random.randint(0, len(self.bag) - 1))
self.clearing = False
self.clear_delay = 0
self.clear_lines = np.zeros((25,), dtype=np.bool)
self.update()
def update(self):
x = self.current.x + 3
y = self.current.y + 2
shape = self.current.matrix.shape
matrix_ = self.matrix.copy()
matrix_[y:y + shape[0],
x:x + shape[1]] += self.current.matrixs[self.current.index]
self.image = pygame.Surface((198, 398)).convert_alpha()
self.image.fill((158, 173, 134, 0))
if self.clearing:
for i in range(10):
for j in range(20):
if self.clear_lines[j + 2]:
self.image.blit(self.clear_rects[self.clear_delay // 2],
(i * 20, j * 20))
else:
self.image.blit(
self.filled_rect if matrix_[j + 2, i + 3] else
self.unfilled_rect, (i * 20, j * 20))
self.clear_delay = (self.clear_delay + 1) % (2 *
len(self.clear_rects))
if self.clear_delay == 0:
self.after_clear()
else:
for i in range(10):
for j in range(20):
self.image.blit(
self.filled_rect if matrix_[j + 2, i +
3] else self.unfilled_rect,
(i * 20, j * 20))
self.rect = self.image.get_rect()
def random_startline(self, start_line: int = 0):
self.matrix[-3 - start_line:-3,
3:-3] += np.random.randint(0, 2, (start_line, 10))
def check_collision(self) -> bool:
x = self.current.x + 3
y = self.current.y + 2
shape = self.current.matrix.shape
matrix_ = self.matrix.copy()
matrix_[y:y + shape[0],
x:x + shape[1]] += self.current.matrixs[self.current.index]
return np.any(matrix_ > 1)
def check_clear(self) -> int:
self.clear_lines = np.all(self.matrix, axis=1)
self.clearing = np.any(self.clear_lines[2:-3])
return sum(self.clear_lines[2:-3])
def after_clear(self):
self.clearing = False
for index, line in enumerate(self.clear_lines[2:-3]):
if line:
tmp = np.delete(self.matrix, index + 2, 0)
self.matrix = np.insert(tmp, 0, 1, axis=0)
self.matrix[0, 3:-3] = 0
def check_gameover(self) -> bool:
return np.any(self.matrix[:2, 3:-3] > 0)
def add_tetris(self):
x = self.current.x + 3
y = self.current.y + 2
shape = self.current.matrix.shape
self.matrix[y:y + shape[0],
x:x + shape[1]] += self.current.matrixs[self.current.index]
def next_tetris(self):
self.current = self.next
self.next = self.bag.pop(random.randint(0, len(self.bag) - 1))
if not self.bag:
self.bag = self.fill_bag()
def fill_bag(self) -> List[Tetris]:
"""7bag"""
return [
ITetris(3, -2),
TTetris(3, -2),
LTetris(3, -2),
JTetris(3, -2),
OTetris(4, -2),
ZTetris1(3, -2),
ZTetris2(3, -2)
]
|
from transaction import *
from user import *
from extract_pk import extract_pk
ts=load_transactions()
us=load_users()
keys={zw.nam:extract_pk(zw.key) for zw in us}
print(keys)
for t in ts:
print(t)
print(t.to_dict())
key=keys[t.fro]
print(key)
|
import json
import logging
from modules.settings import settings
from modules.rabbitmq import channel
def log(level, msg, *args, **kwargs):
channel.basic_publish(
exchange='',
routing_key=settings['log-event-queue'],
body=json.dumps({
'level': level,
'msg': msg,
'args': args,
'kwargs': kwargs,
}),
)
def debug(msg, *args, **kwargs):
log(logging.DEBUG, msg, *args, **kwargs)
def info(msg, *args, **kwargs):
log(logging.INFO, msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
log(logging.WARNING, msg, *args, **kwargs)
def error(msg, *args, **kwargs):
log(logging.ERROR, msg, *args, **kwargs)
def critical(msg, *args, **kwargs):
log(logging.CRITICAL, msg, *args, **kwargs)
|
import urllib3
from django import forms
from service_catalog.models import Instance
from service_catalog.models.instance import InstanceState
from Squest.utils.squest_model_form import SquestModelForm
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class InstanceForm(SquestModelForm):
state = forms.ChoiceField(
choices=InstanceState.choices,
required=True,
widget=forms.Select())
class Meta:
model = Instance
fields = "__all__"
|
import pygame
from pygame.locals import *
from OpenGL.GL import *
pygame.init()
screen_width = 500
screen_heigth = 500
screen = pygame.display.set_mode((screen_width, screen_heigth), DOUBLEBUF | OPENGL)
pygame.display.set_caption("OpenGL in Python")
done = False
white = pygame.Color(255, 255, 255)
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
pygame.display.flip()
pygame.quit() |
## IMPORTS ##
from base.equipment.dice import Dice
from base.equipment.card import Card
from base.equipment.board import Board
from random import shuffle
## CLASS ##
class Equipments():
"""
Define an Equipments class
"""
def __init__(self, cards=[[]], dices=[], board=None):
"""
Default constructor that instanciates an Equipments class
Args:
cards: A list of Card objects list, we can have multiple type of cards in different stacks
dices: A list of Dice objects
board: A Board object
Returns:
The new object that was instanciated
Raises:
None
"""
# Set the Cards
self.setCards(cards)
# Set the Dices
self.setDices(dices)
# Set the physical Board
self.setBoard(board)
def __str__(self):
"""
Define the string represention of an Equipments object
Args:
self: the Equipments object
Returns:
A string that represents the Equipments object
Raises:
None
"""
result = "Equipments:\nCards -> [\n"
for l in self._cards:
result += "\t[\n"
for c in l:
result += "\t\t"+str(c)+",\n"
result += "\t]\n"
if self._dices is not None:
result += "], \nDices -> [\n"
for d in self._dices:
result += "\t"+str(d)+",\n"
if self._board is not None:
result += "], \nBoard -> \""+str(self._board)+"\""
return result
def getCards(self):
"""
Get the cards
Args:
self: the Equipments object
Returns:
A list that contains all Card objects
Raises:
None
"""
return self._cards
def setCards(self,newCards):
"""
Set the cards
Args:
self: the Equipments object
newCards: A list that contains all Card objects (separated in different stack)
Returns:
None
Raises:
TypeError: Raise a TypeError if "cards" is not a list of list of Card objects
"""
if isinstance(newCards, list):
for l in newCards:
if isinstance(l, list):
for c in l:
if not isinstance(c, Card):
raise TypeError('"cards" attribute for an Equipments object must be a list of Card objects list')
else:
raise TypeError('"cards" attribute for an Equipments object must be a list of Card objects list')
self._cards = newCards
else:
raise TypeError('"cards" attribute for an Equipments object must be a list')
def addCard(self, i, card):
"""
Add a new card in the list of cards at the index i
Args:
self: the Equipments object
card: the new Card object to add
i: Integer that represents the index of the concerned list
Returns:
None
Raises:
TypeError: Raise a TypeError if "card" is not a Card object
"""
if isinstance(card, Card):
self._cards[i].append(card)
else:
raise TypeError('"card" parameter must be a Card object')
def getDices(self):
"""
Get the dices
Args:
self: the Equipments object
Returns:
A list that contains all Dice objects
Raises:
None
"""
return self._dices
def setDices(self,newDices):
"""
Set the dices
Args:
self: the Equipments object
newDices: A list that contains all Dice objects
Returns:
None
Raises:
TypeError: Raise a TypeError if "dices" is not a list of Dice objects
"""
if isinstance(newDices, list) or newDices is None:
if newDices is None:
self._dices = None
else:
for d in newDices:
if not isinstance(d, Dice):
raise TypeError('"dices" attribute for an Equipments object must be a list of Dice objects')
self._dices = newDices
else:
raise TypeError('"dices" attribute for an Equipments object must be a list')
def addDice(self, newDice):
"""
Add a new dice in the list of dices
Args:
self: the Equipments object
newDice: the new Dice object to add
Returns:
None
Raises:
TypeError: Raise a TypeError if "dice" is not a Dice object
"""
if isinstance(dice, Dice):
self._dices.append(dice)
else:
raise TypeError('"dice" parameter must be a Dice object')
def getBoard(self):
"""
Get the board
Args:
self: the Equipments object
Returns:
the Board object
Raises:
None
"""
return self._board
def setBoard(self,newBoard):
"""
Set the board
Args:
self: the Equipments object
newBoard: the new Board object
Returns:
None
Raises:
TypeError: Raise a TypeError if "board" is not a Board object
"""
if isinstance(newBoard, Board) or newBoard is None:
self._board = newBoard
else:
raise TypeError('"board" attribute for an Equipments object must be a Board object')
def shuffleTheCards(self, i):
"""
Shuffle all the cards
Args:
self: The current object
i: Integer that represents the index of the concerned list
Returns:
None, but cards are shuffled
Raises:
None
"""
shuffle(self._cards[i])
def pushCard(self, i, newCard):
"""
Push a card to the list at index i
Args:
self: The current object
newCard: The Card object to push
i: Integer that represents the index of the concerned list
Returns:
None
Raises:
None
"""
if isinstance(newCard, Card):
self._cards.push(newCard)
else:
raise TypeError('"newCard" attribute must be a Card object')
def popCard(self, i):
"""
Pop a card from the list at index i
Args:
self: The current object
i: Integer that represents the index of the concerned list
Returns:
None
Raises:
None
"""
if i < len(self._cards):
return self._cards[i].pop()
else:
return None
|
from django.core.serializers.json import Serializer
class LeaderboardApiSerializer(Serializer):
def get_dump_object(self, obj):
data = super().get_dump_object(obj)
del data['model']
del data['pk']
del data['fields']
data['twitter_id'] = obj.twitter_id
data['twitter_name'] = obj.twitter_name
data['twitter_username'] = obj.twitter_username
data['twitter_follower_count'] = obj.twitter_follower_count
return data
leaderboard_api_serializer = LeaderboardApiSerializer()
|
import asyncio
import pyfirmata
from pyfirmata import util
from grow.models import Lampada, Relay
board = pyfirmata.Arduino('/dev/cu.usbmodem14301')
it = pyfirmata.util.Iterator(board)
it.start()
sensor_lm35 = board.get_pin('a:0:i')
async def smart_irrigation(pin: int, tempo: int):
board.digital[pin].write(0)
status = 'on'
print(F'A porta {pin} está ativa por {tempo} segundos')
while status == 'on':
board.digital[pin].write(1)
await asyncio.sleep(tempo)
board.digital[pin].write(0)
print('off')
status = 'off'
print('Done')
async def smart_irrigation_v2(pin: int, tempo: int):
print(F'A porta {pin} está ativa ... {tempo}')
board.digital[pin].write(1)
while tempo > 0:
print('/' * tempo)
await asyncio.sleep(1)
tempo -= 1
board.digital[pin].write(0)
print('Done')
async def on(pin: int):
board.digital[pin].write(1)
async def off(pin: int):
board.digital[pin].write(0)
def get_values():
lampadas = Lampada.objects.all()
reles = Relay.objects.all()
lampadas_status = []
reles_status = []
temperatura = sensor_lm35.read()
# sensor_lm35 = board.analog[0].read()
for lampada in lampadas:
try:
status = board.digital[lampada.pin].read()
item = {'name': lampada.name, 'pin': lampada.pin, 'status': status}
lampadas_status.append(item)
except AttributeError:
print('não existe')
pass
except IndexError:
print('não existe')
pass
for rele in reles:
try:
status = board.digital[rele.pin].read()
item = {'name': rele.name, 'pin': rele.pin, 'status': status}
reles_status.append(item)
except AttributeError:
print('não existe')
pass
except IndexError:
print('não existe')
pass
print(lampadas_status)
print(reles_status)
print(temperatura * 0.48828125 * 1000)
return {'lampadas': lampadas_status,
'reles': reles_status,
'sensores': temperatura * 0.48828125 * 1000}
|
from rest_framework import serializers
from .models import Client
from .models import Transaction
class ClientSerializer(serializers.ModelSerializer):
class Meta:
model = Client
fields = '__all__'
class TransactionSerializer(serializers.ModelSerializer):
class Meta:
model = Transaction
fields = '__all__'
|
# From: http://espn.go.com/nfl/schedule
import logging
import sys
import datetime
import urllib.request
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.deletion import ProtectedError
from bs4 import BeautifulSoup
from footballseason.models import Game, Team
from footballseason import fb_utils
import footballseason.management.commands.espn_common as espn_common
LOG = logging.getLogger(__name__)
# Call from CLI via: $ python manage.py generate_schedule
class Command(BaseCommand):
season = fb_utils.get_season()
week_list = range(1, fb_utils.NUM_WEEKS + 1)
def add_games_from_one_week(self, season, week):
url = "http://espn.go.com/nfl/schedule/_/year/{0}/week/{1}".format(season, week)
LOG.info("url: {0}".format(url))
with urllib.request.urlopen(url) as response:
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
all_game_ids = []
# Each table is a day (e.g. Thursday, Sunday, Monday)
for table in soup.findAll("table", class_="schedule"):
odd = table.find_all('tr', {"class": "odd"})
even = table.find_all('tr', {"class": "even"})
all_results = odd + even
for row in all_results:
# Each row is one game
game_time_data = row.find('td', {"data-behavior": "date_time"})
if game_time_data is None:
# bye teams
continue
game_time = game_time_data['data-date']
# 2016-12-16T01:25Z
game_datetime = datetime.datetime.strptime(game_time, "%Y-%m-%dT%H:%MZ")
# Make gametime timezone aware
game_datetime = game_datetime.replace(tzinfo=datetime.timezone.utc)
team_names = []
for team_abbr in row.find_all('abbr'):
team_names.append(espn_common.espn_team_names[team_abbr.contents[0].lower()])
try:
away = Team.objects.get(team_name=team_names[0])
home = Team.objects.get(team_name=team_names[1])
except ObjectDoesNotExist:
# Could not find team, this shouldn't happen
LOG.info("Count not find either team {0} or {1}, unable to add game to schedule".format(team_names[0], team_names[1]))
sys.exit(1)
try:
obj = Game.objects.get(season=season, week=week, home_team=home, away_team=away)
except Game.DoesNotExist:
obj = Game(season=season, week=week, home_team=home, away_team=away, game_time=game_datetime)
LOG.info("Adding: {0}".format(obj))
else:
if obj.game_time != game_datetime:
obj.game_time = game_datetime
LOG.info(f"{obj} was already on the schedule, updating gametime and saving")
finally:
obj.save()
all_game_ids.append(obj.id)
removed_games = Game.objects.filter(season=season, week=week).exclude(id__in=all_game_ids)
if removed_games:
try:
LOG.info(f"Removing {removed_games.count()} games: {list(removed_games)}")
removed_games.delete()
except ProtectedError:
LOG.info("Unable to remove games with picks")
def add_all_games(self):
for week in self.week_list:
LOG.info("Processing week {0}".format(week))
self.add_games_from_one_week(self.season, week)
def handle(self, *args, **options):
self.add_all_games()
|
"""
A simple Multi-User Dungeon (MUD) game. Players can talk to each other, examine
their surroundings and move between rooms.
Some ideas for things to try adding:
* More rooms to explore
* An 'emote' command e.g. 'emote laughs out loud' -> 'Mark laughs out loud'
* A 'whisper' command for talking to individual players
* A 'shout' command for yelling to players in all rooms
* Items to look at in rooms e.g. 'look fireplace' -> 'You see a roaring, glowing fire'
* Items to pick up e.g. 'take rock' -> 'You pick up the rock'
* Monsters to fight
* Loot to collect
* Saving players accounts between sessions
* A password login
* A shop from which to buy items
author: Mark Frimston - mfrimston@gmail.com
"""
import time
import json
# import the MUD server class
from mudserver import MudServer
# structure defining the rooms in the game. Try adding more rooms to the game!
with open('world.json') as json_data:
rooms = json.load(json_data)
# structure where players are saved
with open('players.json') as json_data:
players = json.load(json_data)
active_players = {}
# start the server
mud = MudServer()
# main game loop. We loop forever (i.e. until the program is terminated)
while True:
# pause for 1/5 of a second on each loop, so that we don't constantly
# use 100% CPU time
time.sleep(0.2)
# 'update' must be called in the loop to keep the game running and give
# us up-to-date information
mud.update()
# go through any newly connected players
for id in mud.get_new_players():
# add the new player to the dictionary, noting that they've not been
# named yet.
# The dictionary key is the player's id number. Start them off in the
# 'Tavern' room.
# Try adding more player stats - level, gold, inventory, etc
active_players[id] = {
"name": "temp",
"room": "temp",
"inventory": []
}
# send the new player a prompt for their name
mud.send_message(id, "What is your name?")
# go through any recently disconnected players
for id in mud.get_disconnected_players():
# if for any reason the player isn't in the player map, skip them and
# move on to the next one
if id not in active_players:
continue
# go through all the players in the game
for pid in active_players:
# send each player a message to tell them about the disconnected player
mud.send_message(pid, "%s quit the game" % active_players[id]["name"])
# remove the player's entry in the player dictionary
del(active_players[id])
# go through any new commands sent from players
for id, command, params in mud.get_commands():
# if for any reason the player isn't in the player map, skip them and
# move on to the next one
if id not in active_players:
continue
# if the player hasn't given their name yet, use this first command as their name
if active_players[id]["name"] == "temp":
active_players[id]["name"] = command
active_players[id]["room"] = "Tavern"
try:
if players[command] is not None:
t_room = players[command]["room"]
t_inv = players[command]["inventory"]
active_players[id] = {"name": command, "room": t_room, "inventory": t_inv}
except KeyError:
pass
# go through all the players in the game
for pid, pl in active_players.items():
if active_players[id]["name"] != "temp":
# send each player a message to tell them about the new player
mud.send_message(pid, "%s entered the game" % active_players[id]["name"])
# send the new player a welcome message
mud.send_message(id, "Welcome to the game, %s. Type 'help' for a list of commands."
" Have fun!" % active_players[id]["name"])
# send the new player the description of their current room
mud.send_message(id, rooms[active_players[id]["room"]]["description"])
# each of the possible commands is handled below. Try adding new commands
# to the game!
else:
command.lower()
# 'help' command
if command == "help":
# send the player back the list of possible commands
mud.send_message(id, "Commands:")
mud.send_message(id, " say <message> - Says something out loud, e.g. 'say Hello'")
mud.send_message(id, " look - Examines the surroundings, e.g. 'look'")
mud.send_message(id, " go <exit> - Moves through the exit specified, e.g. 'go outside'")
mud.send_message(id, " room <name> <'Description.'> <exit> - Creates a room with the given "
"description and an exit to and from the given location.")
# 'say' command
elif command == "say":
# go through every player in the game
for pid, pl in active_players.items():
# if they're in the same room as the player
if active_players[pid]["room"] == active_players[id]["room"]:
# send them a message telling them what the player said
mud.send_message(pid, "%s says: %s" % (active_players[id]["name"], ' '.join(params)))
# 'look' command
elif command == "look":
# store the player's current room
rm = rooms[active_players[id]["room"]]
# send the player back the description of their current room
mud.send_message(id, rm["description"])
playershere = []
# go through every player in the game
for pid, pl in active_players.items():
# if they're in the same room as the player
if active_players[pid]["room"] == active_players[id]["room"]:
# add their name to the list
if active_players[pid]["name"] is not None:
playershere.append(active_players[pid]["name"])
print(len(playershere))
# send player a message containing the list of players in the room
if playershere is not None:
mud.send_message(id, "Players here: %s" % ", ".join(playershere))
# send player a message containing the list of exits from this room
mud.send_message(id, "Exits are: %s" % ", ".join(rm["exits"]))
# 'go' command
elif command == "go":
# store the exit name
ex = ' '.join(params)
# store the player's current room
rm = rooms[active_players[id]["room"]]
# if the specified exit is found in the room's exits list
if ex in rm["exits"]:
# go through all the players in the game
for pid, pl in active_players.items():
# if player is in the same room and isn't the player sending the command
if active_players[pid]["room"] == active_players[id]["room"] and pid != id:
# send them a message telling them that the player left the room
mud.send_message(pid, "%s left via exit '%s'" % (active_players[id]["name"], ex))
# update the player's current room to the one the exit leads to
active_players[id]["room"] = rm["exits"][ex]
rm = rooms[active_players[id]["room"]]
# go through all the players in the game
for pid, pl in active_players.items():
# if player is in the same (new) room and isn't the player sending the command
if active_players[pid]["room"] == active_players[id]["room"] and pid != id:
# send them a message telling them that the player entered the room
mud.send_message(pid, "%s arrived via exit '%s'" % (active_players[id]["name"], ex))
# send the player a message telling them where they are now
mud.send_message(id, "You arrive at '%s'" % active_players[id]["room"])
# the specified exit wasn't found in the current room
else:
# send back an 'unknown exit' message
mud.send_message(id, "Unknown exit '%s'" % ex)
elif command == "room":
rooms[params[0]] = {"description": params[1], "exits": {''.join(params[2]).lower(): params[2]}}
rooms[params[2]]["exits"][params[0]] = params[0]
with open('world.json', 'w') as outfile:
json.dump(rooms, outfile, sort_keys=True, indent=4, ensure_ascii=False)
elif command == "stop":
# go through all the players in the game
for pid, pl in active_players.items():
# send each player a message to tell them about the disconnected player
mud.send_message(pid, "The server is shutting down! Bye!")
save_players()
with open('players.json', 'w') as outfile:
json.dump(players, outfile, sort_keys=True, indent=4, ensure_ascii=False)
mud.shutdown()
elif command == "give":
it = ' '.join(params)
active_players[id]["inventory"].append(it)
# some other, unrecognised command
else:
# send back an 'unknown command' message
mud.send_message(id, "Unknown command '%s'" % command)
save_players()
def save_players():
for pid in active_players:
t_name = active_players[pid]["name"]
t_room = active_players[pid]["room"]
t_inv = active_players[pid]["inventory"]
players[t_name] = {"room": t_room, "inventory": t_inv}
|
"""
MIT License
This example is based on https://github.com/eriklindernoren/Keras-GAN
Copyright (c) 2017 Erik Linder-Norén
Copyright (c) 2019 Ivan Vasilev
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import \
BatchNormalization, Input, Dense
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import RMSprop
def build_generator(latent_input: Input):
"""
Build generator FC network
:param latent_input: the latent input
"""
model = Sequential([
# start with a fully connected layer to upsample the 1d latent vector
# the input_shape is the same as latent_input (excluding the mini-batch)
Dense(64, input_shape=latent_input.shape[1:]),
BatchNormalization(),
LeakyReLU(0.2),
Dense(128),
BatchNormalization(),
LeakyReLU(0.2),
Dense(256),
BatchNormalization(),
LeakyReLU(0.2),
Dense(512),
BatchNormalization(),
LeakyReLU(0.2),
Dense(28 * 28, activation='tanh'),
])
model.summary()
# this is forward phase
generated = model(latent_input)
# build model from the input and output
return Model(z, generated)
def build_critic():
"""
Build critic FC network
"""
model = Sequential([
Dense(512, input_shape=(28 * 28,)),
LeakyReLU(0.2),
Dense(256),
LeakyReLU(0.2),
Dense(1),
])
model.summary()
image = Input(shape=(28 * 28,))
output = model(image)
return Model(image, output)
def train(generator, critic, combined, steps, batch_size, n_critic, clip_value):
"""
Train the GAN model
:param generator: generator model
:param critic: the critic model
:param combined: stacked generator and critic
we'll use the combined network when we train the generator
:param steps: number of alternating steps for training
:param batch_size: size of the minibatch
:param n_critic: how many critic training steps for one generator step
:param clip_value: clip value for the critic weights
"""
# Load the dataset
(x_train, _), _ = mnist.load_data()
# Rescale in [-1, 1] interval
x_train = (x_train.astype(np.float32) - 127.5) / 127.5
# We use FC networks, so we flatten the array
x_train = x_train.reshape(x_train.shape[0], 28 * 28)
# Discriminator ground truths
real = np.ones((batch_size, 1))
fake = -np.ones((batch_size, 1))
latent_dim = generator.input_shape[1]
# Train for number of steps
for step in range(steps):
# Train the critic first for n_critic steps
for _ in range(n_critic):
# Select a random batch of images
real_images = x_train[np.random.randint(0, x_train.shape[0], batch_size)]
# Sample noise as generator input
noise = np.random.normal(0, 1, (batch_size, latent_dim))
# Generate a batch of new images
generated_images = generator.predict(noise)
# Train the critic
critic_real_loss = critic.train_on_batch(real_images, real)
critic_fake_loss = critic.train_on_batch(generated_images, fake)
critic_loss = 0.5 * np.add(critic_real_loss, critic_fake_loss)
# Clip critic weights
for l in critic.layers:
weights = l.get_weights()
weights = [np.clip(w, -clip_value, clip_value) for w in weights]
l.set_weights(weights)
# Train the generator
# Note that we use the "valid" labels for the generated images
# That's because we try to maximize the discriminator loss
generator_loss = combined.train_on_batch(noise, real)
# Display progress
print("%d [Critic loss: %.4f%%] [Generator loss: %.4f%%]" %
(step, critic_loss[0], generator_loss))
def plot_generated_images(generator):
"""
Display a nxn 2D manifold of digits
:param generator: the generator
"""
n = 10
digit_size = 28
# big array containing all images
figure = np.zeros((digit_size * n, digit_size * n))
latent_dim = generator.input_shape[1]
# n*n random latent distributions
noise = np.random.normal(0, 1, (n * n, latent_dim))
# generate the images
generated_images = generator.predict(noise)
# fill the big array with images
for i in range(n):
for j in range(n):
slice_i = slice(i * digit_size, (i + 1) * digit_size)
slice_j = slice(j * digit_size, (j + 1) * digit_size)
figure[slice_i, slice_j] = np.reshape(generated_images[i * n + j], (28, 28))
# plot the results
plt.figure(figsize=(6, 5))
plt.axis('off')
plt.imshow(figure, cmap='Greys_r')
plt.show()
def wasserstein_loss(y_true, y_pred):
"""The Wasserstein loss implementation"""
return tensorflow.keras.backend.mean(y_true * y_pred)
if __name__ == '__main__':
print("Wasserstein GAN for new MNIST images with TF/Keras")
latent_dim = 100
# Build the generator
# Generator input z
z = Input(shape=(latent_dim,))
generator = build_generator(z)
generated_image = generator(z)
# we'll use RMSprop optimizer
optimizer = RMSprop(lr=0.00005)
# Build and compile the discriminator
critic = build_critic()
critic.compile(optimizer, wasserstein_loss,
metrics=['accuracy'])
# The discriminator takes generated image as input and determines validity
real_or_fake = critic(generated_image)
# Only train the generator for the combined model
critic.trainable = False
# Stack the generator and discriminator in a combined model
# Trains the generator to deceive the discriminator
combined = Model(z, real_or_fake)
combined.compile(loss=wasserstein_loss, optimizer=optimizer)
# train the GAN system
train(generator, critic, combined,
steps=40000, batch_size=100, n_critic=5, clip_value=0.01)
# display some random generated images
plot_generated_images(generator)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-03-28 14:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('countries', '0002_europecountry'),
('confreg', '0045_mail_volunteer_checkin'),
]
operations = [
migrations.AddField(
model_name='conference',
name='initial_common_countries',
field=models.ManyToManyField(blank=True, help_text='Initial set of common countries', to='countries.Country'),
),
migrations.AlterField(
model_name='conferenceregistration',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='countries.Country', verbose_name='Country'),
),
]
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import sys
import time
import telepot
class TelegramBot(telepot.Bot):
_answerer = None
_bot_data = None
def __init__(self, *args, **kwargs):
super(TelegramBot, self).__init__(*args, **kwargs)
self._answerer = telepot.helper.Answerer(self)
self._bot_data = self.getMe()
def on_chat_message(self, msg):
content_type, chat_type, chat_id = telepot.glance(msg)
#print('Chat Message:', content_type, chat_type, chat_id)
if chat_type == "private" or msg['text'].lower().find(
self._bot_data['username'].lower()) > -1:
reply = "Faaala {}, seu arrombado!".format(msg['from']['first_name'])
self.sendMessage(chat_id, reply)
def on_callback_query(self, msg):
query_id, from_id, query_data = telepot.glance(msg, flavor='callback_query')
#print('Callback Query:', query_id, from_id, query_data)
def on_inline_query(self, msg):
query_id, from_id, query_string = telepot.glance(msg, flavor='inline_query')
#print('Inline Query:', query_id, from_id, query_string)
def compute_answer():
# Compose your own answers
articles = [{'type': 'article',
'id': 'abc', 'title': query_string, 'message_text': query_string}]
return articles
self._answerer.answer(msg, compute_answer)
def on_chosen_inline_result(self, msg):
result_id, from_id, query_string = telepot.glance(msg, flavor='chosen_inline_result')
#print('Chosen Inline Result:', result_id, from_id, query_string)
# Bot Instantiation and execution
TOKEN = "TOKEN-HERE"
topDaBalada = TelegramBot(TOKEN)
topDaBalada.message_loop()
print("listening...")
while True:
time.sleep(5)
# outputMessage = botData['first_name'] + " na área."
# print(outputMessage);
|
from functools import wraps
from models.users import UserModal
import datetime
import jwt
# from api.bucket import app
from api.bucket import *
def get_token():
""" This methods gets the token from the headers"""
try:
token = request.headers.get('Authorization')
return token
except Exception as exc:
# log error
return exc
def method_to_be_returned(func, *args, **kwargs):
""" This method selects which function to return"""
try:
if len(args) == 0 and len(kwargs) == 0:
return func()
else:
return func (*args, **kwargs)
except Exception as exc:
response = jsonify({'Failed with Exception': exc})
response.status_code = 500
return response
def validate_content_type(f):
""" This method checks whether the content-type is application/json"""
@wraps(f)
def decorated_method(*args, **kwargs):
# log errors here
if request.headers.get('content-type') != 'application/json':
response = jsonify({
'Error': 'Content-Type not specified as application/json'
})
response.status_code = 400
return response
return method_to_be_returned(f, *args, **kwargs)
return decorated_method
def validate_token(f):
@wraps(f)
def decorated_method(*args, **kwargs):
token = get_token()
if token is None:
response = jsonify({
'Error': 'There is no access token'
})
response.status_code = 401
return response
try:
user_id = decode_auth_token(token)
user = UserModal.get_user_by_id(user_id)
if user is None:
response = jsonify({
'status': 'mismatching or wrong token'
})
response.status_code = 401
return response
except Exception as exc:
response = jsonify({
'Failed with exception': exc
})
response.status_code = 500
return method_to_be_returned(f, *args, **kwargs)
return decorated_method
def encode_auth_token(user_id):
"""
This method encodes the Authorisation token
:param user_id:
:return:
"""
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
auth_token = jwt.encode(
payload,
"app.config.get('SECRET_KEY')",
algorithm='HS256')
return auth_token
except Exception as exc:
#logg errors here
return exc
def decode_auth_token(token):
"""
Decodes the authorization token
:param token:
:return:
"""
try:
payload = jwt.decode(token, "app.config.get('SECRET_KEY')")
user = payload['sub']
print(user)
return user
except jwt.ExpiredSignature:
return 'Token expired please login again'
except jwt.InvalidTokenError:
return 'Invalid token. Please login again \n' |
from robosuite.models.grippers import GripperTester, PR2Gripper
def test_pr2():
pr2_tester(False)
def pr2_tester(render,
total_iters=1,
test_y=True):
gripper = PR2Gripper()
tester = GripperTester(
gripper=gripper,
pos="0 0 0.3",
quat="0 0 1 0",
gripper_low_pos=-0.02,
gripper_high_pos=0.05,
render=render,
)
tester.start_simulation()
tester.loop(total_iters=total_iters,
test_y=test_y)
if __name__ == "__main__":
pr2_tester(True, 20, False)
|
#!/usr/bin/env python3
import time
import argparse
import numpy as np
from collections import deque
import gym
import gym_minigrid
# from scores.score_logger import ScoreLogger
import random
from gym_minigrid.wrappers import *
from gym_minigrid.window import Window
def redraw(img):
if not args.agent_view:
img = env.render('rgb_array', tile_size=args.tile_size)
window.show_img(img)
def reset():
if args.seed != -1:
env.seed(args.seed)
obs = env.reset()
if hasattr(env, 'mission'):
print('Mission: %s' % env.mission)
window.set_caption(env.mission)
redraw(obs)
ACTION_IDX = {
'left' : 0,
'right' : 1,
'forward' : 2,
'pickup' : 3,
'drop' : 4,
'toggle' : 5,
'enter' : 6
}
def step(action):
step = 0
obs, reward, done, info = env.step(action)
print('step=%s, reward=%.2f' % (env.step_count, reward))
# new code
o = obs["image"]
o = o.transpose()
# print(o)
o = preprocess(o)
input_state = rdf(o)
print("***********************input state*********************************** ")
print(input_state)
input_rdf = {}
for s, p, o in input_state:
if p in list(input_rdf.keys()):
input_rdf[p].append(o)
else:
# l = [[s, o]]
input_rdf[p] = [o]
print(input_rdf)
leaf_state = []
dTree.root.reward = reward
state = dTree.root.traverse(input_rdf, leaf_state)
# action = state_node.assertAction
while not done:
# key_handler.key = action
redraw(obs)
step += 1
# get the action at current state
print("Action to Take: ", state.assertAction)
action = state.assertAction
#get the next state
obs_next, reward, done, info = env.step(ACTION_IDX[action])
print('step=%s, reward=%.2f' % (env.step_count, reward))
reward = reward if not done else -reward
o = obs_next["image"]
o = o.transpose()
o = preprocess(o)
input_state = rdf(o)
print(input_state)
input_rdf = {}
for s, p, o in input_state:
if p in list(input_rdf.keys()):
input_rdf[p].append(o)
else:
# l = [[s, o]]
input_rdf[p] = [o]
print(input_rdf)
leaf_state = [] # default
state.reward = reward
# next_state = dTree.root.traverse(input_state, leaf_state)
next_state = dTree.root.traverse(input_rdf, leaf_state)
# print("Before Q-update: ", next_state.assertAction)
#
if (step % 300) == 0:
dTree.randFlag = True
dTree.remember(state, ACTION_IDX[action], reward, next_state, done)
state = next_state
obs = obs_next
# action = state_node.assertAction
# if action is None:
# action = random.randint(0, len(ACTION_TO_IDX)-1)
if done:
print("done")
break;
# old code
if done:
print('done!')
reset()
else:
redraw(obs)
# Map of object type to integers old
OBJECT_TO_IDX_OLD = {
'unseen': 0,
'empty': 1,
'wall': 2,
'floor': 3,
'door': 4,
'key': 5,
'ball': 6,
'box': 7,
'goal': 8,
'lava': 9,
'agent': 10
}
# Map of object type to integers new
OBJECT_TO_IDX_NEW = {
'agent': 0,
'key': 1,
'door': 2,
'goal': 3
}
def preprocess(old_state):
dim = len(OBJECT_TO_IDX_NEW)
old_key_list = list(OBJECT_TO_IDX_OLD.keys())
old_val_list = list(OBJECT_TO_IDX_OLD.values())
obs = (3, dim, dim)
obs = np.zeros(obs, dtype=int)
visible = (dim, dim)
visible = np.zeros(visible, dtype=int)
carrying = (dim, dim)
carrying = np.zeros(carrying, dtype=int)
locked = (dim, dim)
locked = np.zeros(locked, dtype=int)
for key in OBJECT_TO_IDX_NEW:
old_index = OBJECT_TO_IDX_OLD[key]
new_index = OBJECT_TO_IDX_NEW[key]
found = np.where(old_state[0] == old_index)
# print("agent ", old_state[0][6][3])
# print("key ", old_index)
if (found[0].size > 0):
visible[0][new_index] = 1
obs[0] = visible
if (old_state[0][6][3] != 1):
carrying_object = old_key_list[old_val_list.index(old_state[0][6][3])]
new_index = OBJECT_TO_IDX_NEW[carrying_object]
carrying[0][new_index] = 1
obs[1] = carrying
is_door_locked = np.where(old_state[2] == 2)
door_index = OBJECT_TO_IDX_NEW['door']
if (is_door_locked[0].size > 0):
locked[0][door_index] = 1
obs[2] = locked
# print(obs)
return obs
def rdf(o):
state = []
visible = o[0]
carrying = o[1]
locked = o[2]
objects_visible = np.where(visible[0] == 1)
objects_carrying = np.where(carrying[0] == 1)
door_locked = np.where(locked[0] == 1)
key_list = list(OBJECT_TO_IDX_NEW.keys())
val_list = list(OBJECT_TO_IDX_NEW.values())
# print(locked)
for b in objects_visible[0]:
object = key_list[val_list[b]]
state.append(("agent", "visible", object))
for b in objects_carrying[0]:
object = key_list[val_list[b]]
state.append(("agent", "carrying", object))
for b in door_locked[0]:
object = key_list[val_list[b]]
state.append(("agent", "locked", object))
# for s, p, o in state:
# print((s, p, o))
return state
def key_handler(event):
print('pressed', event.key)
if event.key == 'escape':
window.close()
return
if event.key == 'backspace':
reset()
return
if event.key == 'left':
step(env.actions.left)
return
if event.key == 'right':
step(env.actions.right)
return
if event.key == 'up':
step(env.actions.forward)
return
# Spacebar
if event.key == ' ':
step(env.actions.toggle)
return
if event.key == 'pageup':
step(env.actions.pickup)
return
if event.key == 'pagedown':
step(env.actions.drop)
return
if event.key == 'enter':
step(env.actions.done)
return
ACTION_TO_IDX = {
0 : 'left',
1 : 'right',
2 : 'forward',
3 : 'pickup',
4 : 'drop',
5 : 'toggle',
6 : 'enter'
}
DISCOUNT_FACTOR = 0.3
LEARNING_RATE = 0.001
MEMORY_SIZE = 1000
BATCH_SIZE = 20
ROOT_FLAG = False
class Tree:
def __init__(self):
self.root = None
# self.old_state = None #store the leafnode
self.memory = deque(maxlen=MEMORY_SIZE)
self.isFit = False
self.randFlag = False
self.cnt = 0
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
state.q_update(next_state, action, self.randFlag)
if self.randFlag and self.cnt < 50:
self.randFlag = False
self.cnt = 0
elif self.randFlag:
self.cnt += 1
class TreeNode:
def __init__(self, predicate, obj, n=0, assertAction=""):
self.nodeType = n ## test, 1 -> #leaf
self.parent = None
self.yes = None
self.no = None
self.reward = 0
self.learning_rate = LEARNING_RATE
self.discount_fact = DISCOUNT_FACTOR
self.last_state = []
self.assertactn = " "
# for test nodes
if self.nodeType == 0:
self.predicate = predicate
self.obj = obj
# for leaf nodes
else:
self.expression = []
self.assertAction = assertAction
self.Q_val = 50
# TODo
self.Q_val_list = list(50 for i in range(len(ACTION_TO_IDX)))
def insert(self, side, val, assertAction=" "):
# Compare the new value with the parent node
if len(val) != 0:
if side == "yes":
self.yes = TreeNode(val[0], val[1])
self.yes.parent = self
else:
self.no = TreeNode(val[0], val[1])
self.no.parent = self
else:
if side == "yes":
self.yes = TreeNode("", "", 1, assertAction)
self.yes.parent = self
else:
self.no = TreeNode("", "", 1, assertAction)
self.yes.parent = self
def print(self):
if self.nodeType == 0:
print("Test node ", self.predicate, self.obj)
else:
print("Leaf Node ", self.assertAction, self.expression, self.Q_val)
if self.yes:
self.yes.print()
if self.no:
self.no.print()
def q_update(self, next_state, action, flag):
print("-------Inside Q-update function--------")
# print("Old State: ", self.expression, " Q-val: ", self.Q_val_list)
# print("Action : ", ACTION_TO_IDX[action])
# print("Next State: ", next_state.expression, "Q-val: ", next_state.Q_val_list)
if flag:
action = random.randint(0, len(ACTION_TO_IDX) - 1)
self.assertAction = ACTION_TO_IDX[action]
return
# TODO --> calculate the 'estimate of optimal future value'
# q_update = reward
self.Q_val_list[action] = self.Q_val_list[action] + self.learning_rate * (
self.reward + self.discount_fact * max(next_state.Q_val_list) - self.Q_val_list[action]
)
m = max(self.Q_val_list)
l = [i for i, j in enumerate(self.Q_val_list) if j == m]
print("set of max value action : ", l)
# action = self.Q_val_list.index(max(self.Q_val_list))
if len(l) > 1:
action = random.choice(l)
else:
action = l[0]
self.Q_val = self.Q_val_list[action]
self.assertAction = ACTION_TO_IDX[action]
def get_action(self):
# todo normalize the q-val, and randomize at periodic time
if self.assertAction == " ":
# select random action
action = random.randint(0, len(ACTION_TO_IDX)-1)
self.assertAction = ACTION_TO_IDX[action]
# self.assertactn = self.assertAction
''' ------------------------------- temp comment added code to q-update fnction ------------
else:
# past,
# do an q-update
# 1. get max of Q_val from possible states with each action
# let's default that with some value now
max_val = 0.1
# TODO --> calculate the 'estimate of optimal future value'
# QUESTION (TODO) ---> is the understanding correct for the calculation?
# Are we going to do a test here to take which predicate to split and create 'yes'
# and 'no' child to get their Q-val and see if there are any changes in the Q-val-vec of child.
# If parent/child Q_val-vec remains same, we don't split,
# otherwise we split the node, and get the MAX Q-val for future val from the child nodes.
for i in range(len(self.Q_val_list)):
self.Q_val_list[i] = self.Q_val_list[i] + self.learning_rate * (
self.reward + self.discount_fact * max_val - self.Q_val_list[i]
)
action = self.Q_val_list.index(max(self.Q_val_list))
self.Q_val = self.Q_val_list[action]
self.assertAction = ACTION_TO_IDX[action]
# self.assertactn = self.assertAction
'''
def traverse(self, predList, state_exp):
if self.nodeType == 1:
self.expression = state_exp
print("LEAF NODE FOUND, @ State ", self.expression)
self.get_action()
print("Q-Value State Action Pair: ", self.Q_val, self.assertAction)
return self
# predFound = 0
# state_exp = []
#
# for s, p, o in predList:
# if self.predicate == p and self.obj == o:
# if self.yes:
# state_exp.append([s, p, o])
# node = self.yes.traverse(predList, state_exp)
# predFound = 1
# # print([s, p, o])
# break;
#
# if predFound == 0:
# if self.no:
# # state_exp.append([s, p, o])
# node = self.no.traverse(predList, state_exp)
if self.predicate in predList.keys():
if self.obj in predList[self.predicate]:
p = self.predicate
for i in range(len(predList[p])):
if predList[p][i] == self.obj:
o = predList[p][i]
if self.yes:
state_exp.append([p, o])
node = self.yes.traverse(predList, state_exp)
else:
if self.no:
node = self.no.traverse(predList, state_exp)
else:
if self.yes:
node = self.yes.traverse(predList, state_exp)
else:
node = self.no.traverse(predList, state_exp)
return node
def create_tree():
dtree = Tree()
dtree.root = TreeNode("visible", "key")
# root_node = TreeNode("visible", "key")
dtree.root.insert("yes", ["carrying", "key"])
dtree.root.insert("no", [], " ")
left = dtree.root.yes
left.insert("yes", ["visible", "door"])
left.insert("no", [], " ")
left = left.yes
left.insert("yes", ["locked", "door"])
left.insert("no", [], " ")
left = left.yes
left.insert("yes", [], " ")
left.insert("no", ["visible", "door"])
right = left.no
right.insert("yes", ["visible", "goal"])
right.insert("no", [], " ")
left = right.yes
left.insert("yes", [], " ")
left.insert("no", [], " ")
# root_node.print()
dtree.root.print()
# return root_node
return dtree
parser = argparse.ArgumentParser()
parser.add_argument(
"--env",
help="gym environment to load",
default='MiniGrid-MultiRoom-N6-v0'
)
parser.add_argument(
"--seed",
type=int,
help="random seed to generate the environment with",
default=-1
)
parser.add_argument(
"--tile_size",
type=int,
help="size at which to render tiles",
default=32
)
parser.add_argument(
'--agent_view',
default=False,
help="draw the agent sees (partially observable view)",
action='store_true'
)
args = parser.parse_args()
env = gym.make(args.env)
dTree = create_tree()
if args.agent_view:
env = RGBImgPartialObsWrapper(env)
env = ImgObsWrapper(env)
window = Window('gym_minigrid - ' + args.env)
window.reg_key_handler(key_handler)
reset()
# Blocking event loop
window.show(block=True)
|
"""Saves constants used for event handling"""
class Enum(tuple):
"""Helper class to define an enum in python."""
__getattr__ = tuple.index
EVENTTYPES = Enum(
['NOEVENT', 'KEYUP', 'KEYDOWN', 'KEYLEFT', 'KEYRIGHT', 'KEYENTER',
'KEYBACK', 'KEYPRESS', 'KEYMENU1', 'KEYMENU2', 'KEYMENU3', 'NFCSCANNED',
'ESCAPE', 'ERROR'])
DEFAULT_GLASS_SIZE = 50
SCAN_RATE_LIMIT = 2 * 60 # 2 minutes
|
#!/opt/local/bin/python3.7
# ------------------------------------------------------------------------------------------------ #
# fig-co2fixation.py
# Calculates area and thickness of thin film in hydrogen-transport by diffusion scenario
#
# Farshid Salimijazi and Buz Barstow
# Last updated by Buz Barstow on 2019-10-25
# ------------------------------------------------------------------------------------------------ #
from rewiredcarbon.scenario import ImportScenarioTable, CalculateScenarioEfficiencies, \
Plot_Efficiency_Bargraph, Generate_EfficienciesDict_Keys_Sorted_by_Efficiency, \
Export_Efficiency_Bargraph
from rewiredcarbon.utils import ensure_dir
scenarioTableFileName = 'input/fig-cbb_n2_to_amino_acids.csv'
outputFilenameEff = 'output/fig-cbb-n2_to_amino_acids/fig-cbb_n2_to_amino_acids_eff.csv'
outputFilenameFuelMassEff = 'output/fig-cbb-n2_to_amino_acids/fig-cbb_n2_to_amino_acids_fuel_mass.csv'
ensure_dir(outputFilenameEff)
ensure_dir(outputFilenameFuelMassEff)
scenarioDict = ImportScenarioTable(scenarioTableFileName)
efficienciesDict = CalculateScenarioEfficiencies(scenarioDict)
# keysArray = \
# Generate_EfficienciesDict_Keys_Sorted_by_Efficiency(efficienciesDict, 'effTotalElectricalToFuel')
keysArray = list(efficienciesDict.keys())
Plot_Efficiency_Bargraph(efficienciesDict, 'effTotalElectricalToFuel', \
'effTotalElectricalToFuel_lowerError', 'effTotalElectricalToFuel_upperError', keysToPlot=keysArray)
Plot_Efficiency_Bargraph(efficienciesDict, 'effTotalElectricalFuelMassEfficiency', \
'effTotalElectricalFuelMassEfficiency_lowerError', \
'effTotalElectricalFuelMassEfficiency_upperError', keysToPlot=keysArray)
Export_Efficiency_Bargraph(outputFilenameEff, efficienciesDict, scenarioDict, \
'effTotalElectricalToFuel', 'effTotalElectricalToFuel_lowerError', \
'effTotalElectricalToFuel_upperError', keysToPlot=keysArray)
Export_Efficiency_Bargraph(outputFilenameFuelMassEff, efficienciesDict, scenarioDict, \
'effTotalElectricalFuelMassEfficiency', 'effTotalElectricalFuelMassEfficiency_lowerError', \
'effTotalElectricalFuelMassEfficiency_upperError', keysToPlot=keysArray)
|
import os, sys
import itertools as it
outFilePath = sys.argv[1]
parameterDict = {}
for parameter in sys.argv[2:]:
parameterName = parameter.split("=")[0]
parameterOptions = parameter.split("=")[1].split(",")
parameterDict[parameterName] = parameterOptions
parameterNames = sorted(parameterDict.keys())
outLines = [parameterNames]
combinations = [dict(zip(parameterNames, prod)) for prod in it.product(*(parameterDict[varName] for varName in parameterNames))]
for combo in combinations:
outLines.append([combo[parameterName] for parameterName in parameterNames])
outFile = open(outFilePath, 'w')
for outLine in outLines:
outFile.write("\t".join(outLine) + "\n")
outFile.close()
|
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
from imitation.policies.base import HardCodedPolicy
from .lq_game import LQFeedbackGame, AffineStrategy
import imitating_games
class FeedbackGameEnv(gym.Env):
"""
A generic, stateful wrapper that creates a `gym.Env` from a game that makes the player with
index `protagonist_index` the protagonist, converting the problem to a single-player problem.
"""
def __init__(self):
game = imitating_games.GuidanceFeedbackGame()
self.stage_strategies = game.solve()
self.protagonist = game.players[0]#game.players[protagonist_index]
self.game = game
nx = game.dynamics.dims[0]
nu_protagonist = len(self.protagonist.input_indices)
self.action_space = spaces.Box(
low=-np.ones(nu_protagonist) * np.inf,
high=np.ones(nu_protagonist) * np.inf,
dtype=np.float32,
)
self.observation_space = spaces.Box(
low=-np.ones(nx) * np.inf, high=np.ones(nx) * np.inf, dtype=np.float32
)
# The internal "episode memory" that also serves as state.
self.seed()
# Note: currently we don't do any seeding (because we always start from the same initial state)
def seed(self, seed=None):
# TODO: is this safe? Or will this correlate the rngs?
self.np_random, main_seed = seeding.np_random(seed)
[action_seed] = self.action_space.seed(main_seed + 1)
[observation_seed] = self.observation_space.seed(main_seed + 2)
return [main_seed, action_seed, observation_seed]
def reset(self):
x0 = self.np_random.uniform(low=-1, high=1, size=self.game.dynamics.dims[0])
self.trajectory = [x0]
return x0
def step(self, action: np.ndarray):
"""
Apply the `action` from the current state (end of `self.trajectory`) by overwriting the
protagonist action.
"""
t = len(self.trajectory) - 2
x = self.trajectory[-1]
u = self.stage_strategies[t].control_input(x).copy()
# overwrite the protagonist actions
u[self.protagonist.input_indices] = action
next_x = self.game.dynamics.next_state(x, u)
self.trajectory.append(next_x)
ob = self.trajectory[-1]
# Note: We reward the agent for the *next* state since the current state cannot be
# influenced by the `action` and we terminate the game once we rolled out the full horizon.
reward = -(self.protagonist.state_cost(next_x) + self.protagonist.input_cost(u))
done = len(self.trajectory) >= self.game.horizon
info = None
return ob, reward, done, {}
def render(self, mode="human"):
return self.game.visualize(self.trajectory)
class GameSolverExpertPolicy(HardCodedPolicy):
def __init__(self, game_env):
super().__init__(game_env.observation_space, game_env.action_space)
self.game_env = game_env
def _choose_action(self, obs: np.ndarray):
# TODO: This is not quite correct. Currently it just always takes the first action because
# time is not part of the state yet.
x = obs
expert_strategy: AffineStrategy = self.game_env.stage_strategies[0]
u_expert = expert_strategy.control_input(x)[
self.game_env.protagonist.input_indices
]
return u_expert
|
import numpy as np
import pytest
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from scipy.sparse import csc_matrix
from sparse_ho.models import SparseLogreg
from sparse_ho.forward import get_beta_jac_iterdiff
from sparse_ho.implicit_forward import get_beta_jac_fast_iterdiff
from sparse_ho.forward import Forward
from sparse_ho.implicit_forward import ImplicitForward
from sparse_ho.implicit import Implicit
from sparse_ho.criterion import Logistic
from sparse_ho.utils import Monitor
from sparse_ho.ho import grad_search
n_samples = 100
n_features = 1000
X_train, y_train = datasets.make_classification(
n_samples=n_samples,
n_features=n_features, n_informative=50,
random_state=110, flip_y=0.1, n_redundant=0)
X_train_s = csc_matrix(X_train)
X_val, y_val = datasets.make_classification(
n_samples=n_samples,
n_features=n_features, n_informative=50,
random_state=122, flip_y=0.1, n_redundant=0)
X_val_s = csc_matrix(X_val)
y_train[y_train == 0.0] = -1.0
y_val[y_val == 0.0] = -1.0
alpha_max = np.max(np.abs(X_train.T @ (- y_train)))
alpha_max /= (2 * n_samples)
alpha = 0.3 * alpha_max
log_alpha = np.log(alpha)
tol = 1e-16
models = [
SparseLogreg(
X_train, y_train, max_iter=10000, estimator=None),
SparseLogreg(
X_train_s, y_train, max_iter=10000, estimator=None)
]
estimator = LogisticRegression(
penalty="l1", tol=1e-12, fit_intercept=False, max_iter=100000,
solver="saga")
models_custom = [
SparseLogreg(
X_train, y_train, max_iter=10000, estimator=estimator),
SparseLogreg(
X_train_s, y_train, max_iter=10000, estimator=estimator)
]
def get_v(mask, dense):
return 2 * (X_val[:, mask].T @ (
X_val[:, mask] @ dense - y_val)) / X_val.shape[0]
@pytest.mark.parametrize('model', models)
def test_beta_jac(model):
supp1, dense1, jac1 = get_beta_jac_iterdiff(
X_train, y_train, log_alpha, tol=tol,
model=model, compute_jac=True, max_iter=1000)
clf = LogisticRegression(penalty="l1", tol=1e-12, C=(
1 / (alpha * n_samples)), fit_intercept=False, max_iter=100000,
solver="saga")
clf.fit(X_train, y_train)
supp_sk = clf.coef_ != 0
dense_sk = clf.coef_[supp_sk]
supp2, dense2, jac2 = get_beta_jac_fast_iterdiff(
X_train, y_train, log_alpha,
get_v, tol=tol, model=model, tol_jac=1e-12)
supp3, dense3, jac3 = get_beta_jac_iterdiff(
X_train, y_train, log_alpha, tol=tol,
model=model, compute_jac=True, max_iter=1000)
supp4, dense4, jac4 = get_beta_jac_fast_iterdiff(
X_train_s, y_train, log_alpha,
get_v, tol=tol, model=model, tol_jac=1e-12)
assert np.all(supp1 == supp_sk)
assert np.allclose(dense1, dense_sk, atol=1e-4)
assert np.all(supp1 == supp2)
assert np.allclose(dense1, dense2)
assert np.allclose(jac1, jac2, atol=1e-4)
assert np.all(supp2 == supp3)
assert np.allclose(dense2, dense3)
assert np.allclose(jac2, jac3, atol=1e-4)
assert np.all(supp3 == supp4)
assert np.allclose(dense3, dense4)
assert np.allclose(jac3, jac4, atol=1e-4)
@pytest.mark.parametrize(('model', 'model_custom'), (models, models_custom))
def test_beta_jac_custom_solver(model, model_custom):
supp, dense, jac = get_beta_jac_fast_iterdiff(
X_train, y_train, log_alpha,
get_v, tol=tol, model=model, tol_jac=1e-12)
supp_custom, dense_custom, jac_custom = get_beta_jac_fast_iterdiff(
X_train, y_train, log_alpha, get_v, tol=tol, model=model_custom,
tol_jac=1e-12)
assert np.all(supp == supp_custom)
assert np.allclose(dense, dense_custom)
assert np.allclose(jac, jac_custom)
@pytest.mark.parametrize('model', models)
def test_val_grad(model):
criterion = Logistic(X_val, y_val, model)
algo = Forward(criterion)
val_fwd, grad_fwd = algo.get_val_grad(log_alpha, tol=tol)
criterion = Logistic(X_val, y_val, model)
algo = ImplicitForward(criterion, tol_jac=1e-8, n_iter_jac=5000)
val_imp_fwd, grad_imp_fwd = algo.get_val_grad(log_alpha, tol=tol)
criterion = Logistic(X_val, y_val, model)
algo = Implicit(criterion)
val_imp, grad_imp = algo.get_val_grad(
log_alpha, tol=tol)
assert np.allclose(val_fwd, val_imp_fwd, atol=1e-4)
assert np.allclose(grad_fwd, grad_imp_fwd, atol=1e-4)
assert np.allclose(val_imp_fwd, val_imp, atol=1e-4)
# for the implcit the conjugate grad does not converge
# hence the rtol=1e-2
assert np.allclose(grad_imp_fwd, grad_imp, rtol=1e-2)
@pytest.mark.parametrize(('model', 'model_custom'), (models, models_custom))
def test_val_grad_custom(model, model_custom):
criterion = Logistic(X_val, y_val, model)
algo = ImplicitForward(criterion, tol_jac=1e-8, n_iter_jac=5000)
val, grad = algo.get_val_grad(log_alpha, tol=tol)
criterion = Logistic(X_val, y_val, model_custom)
algo = ImplicitForward(criterion, tol_jac=1e-8, n_iter_jac=5000)
val_custom, grad_custom = algo.get_val_grad(log_alpha, tol=tol)
assert np.allclose(val, val_custom)
assert np.allclose(grad, grad_custom)
@pytest.mark.parametrize('model', models)
@pytest.mark.parametrize('crit', ['cv'])
def test_grad_search(model, crit):
"""check that the paths are the same in the line search"""
n_outer = 2
criterion = Logistic(X_val, y_val, model)
monitor1 = Monitor()
algo = Forward(criterion)
grad_search(algo, log_alpha, monitor1, n_outer=n_outer,
tol=tol)
criterion = Logistic(X_val, y_val, model)
monitor2 = Monitor()
algo = Implicit(criterion)
grad_search(algo, log_alpha, monitor2, n_outer=n_outer,
tol=tol)
criterion = Logistic(X_val, y_val, model)
monitor3 = Monitor()
algo = ImplicitForward(criterion, tol_jac=tol, n_iter_jac=5000)
grad_search(algo, log_alpha, monitor3, n_outer=n_outer,
tol=tol)
assert np.allclose(
np.array(monitor1.log_alphas), np.array(monitor3.log_alphas))
assert np.allclose(
np.array(monitor1.grads), np.array(monitor3.grads), atol=1e-4)
assert np.allclose(
np.array(monitor1.objs), np.array(monitor3.objs))
assert not np.allclose(
np.array(monitor1.times), np.array(monitor3.times))
@pytest.mark.parametrize(('model', 'model_custom'), (models, models_custom))
@pytest.mark.parametrize('crit', ['cv'])
def test_grad_search_custom(model, model_custom, crit):
"""check that the paths are the same in the line search"""
n_outer = 5
criterion = Logistic(X_val, y_val, model)
monitor = Monitor()
algo = ImplicitForward(criterion, tol_jac=tol, n_iter_jac=5000)
grad_search(algo, log_alpha, monitor, n_outer=n_outer, tol=tol)
criterion = Logistic(X_val, y_val, model_custom)
monitor_custom = Monitor()
algo = ImplicitForward(criterion, tol_jac=tol, n_iter_jac=5000)
grad_search(algo, log_alpha, monitor_custom, n_outer=n_outer, tol=tol)
assert np.allclose(
np.array(monitor.log_alphas), np.array(monitor_custom.log_alphas))
assert np.allclose(
np.array(monitor.grads), np.array(monitor_custom.grads), atol=1e-4)
assert np.allclose(
np.array(monitor.objs), np.array(monitor_custom.objs))
assert not np.allclose(
np.array(monitor.times), np.array(monitor_custom.times))
if __name__ == '__main__':
for model in models:
test_beta_jac(model)
test_grad_search(model, 'cv')
test_val_grad(model)
|
#!coding:utf8
#author:yqq
#date:2019/3/4 0004 14:35
#description: 比特币地址生成算法
#author:yqq
#date:2020/07/9 11:22
#description: 迁移到 python3
import hashlib
import ecdsa
import os
from binascii import unhexlify, hexlify
g_b58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
g_nMaxPrivKey = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364140 - 0x423423843 #私钥最大值 (差值是自定义的)
g_nMinPrivKey = 0x0000000000000000000000000000000000000000000000000000000000000001 + 0x324389329 #私钥最小值 (增值是自定义的)
def Base58encode(n):
'''
base58编码
:param n: 需要编码的数
:return: 编码后的
'''
result = ''
while n > 0:
result = g_b58[n % 58] + result
n = n // 58
return result
def Base256decode(s):
'''
base256编码
:param s:
:return:
'''
result = 0
for c in s:
result = result * 256 + c
return result
def CountLeadingChars(s, ch):
'''
计算一个字符串开头的字符的次数
比如: CountLeadingChars('000001234', '0') 结果是5
:param s:字符串
:param ch:字符
:return:次数
'''
count = 0
for c in s:
if c == ch:
count += 1
else:
break
return count
def Base58CheckEncode(version, payload):
'''
:param version: 版本前缀 , 用于区分主网 和 测试网络
:param payload:
:return:
'''
s = version.to_bytes(1, 'little') + payload
checksum = hashlib.sha256(hashlib.sha256(s).digest()).digest()[0:4] #两次sha256, 区前4字节作为校验和
result = s + checksum
leadingZeros = CountLeadingChars(result, 0)
return '1' * leadingZeros + Base58encode(Base256decode(result))
def PrivKeyToPubKey(privKey):
'''
私钥-->公钥
:param privKey: 共65个字节: 0x04 + x的坐标 + y的坐标
:return:
'''
sk = ecdsa.SigningKey.from_string(privKey.decode('hex'), curve=ecdsa.SECP256k1)
# vk = sk.verifying_key
return ('\04' + sk.verifying_key.to_string()).encode('hex')
#
# def PrivKeyToPubKeyCompress(privKey):
# '''
# 私钥-->公钥 压缩格式公钥
# :param privKey: ( 如果是奇数,前缀是 03; 如果是偶数, 前缀是 02) + x轴坐标
# :return:
# '''
# sk = ecdsa.SigningKey.from_string(privKey.decode('hex'), curve=ecdsa.SECP256k1)
# # vk = sk.verifying_key
# try:
# # print(sk.verifying_key.to_string().encode('hex'))
# point_x = sk.verifying_key.to_string().encode('hex')[ : 32*2] #获取点的 x 轴坐标
# point_y = sk.verifying_key.to_string().encode('hex')[32*2 : ] #获取点的 y 轴坐标
# # print("point_x:", point_x)
#
# if (long(point_y, 16) & 1) == 1: # 如果是奇数,前缀是 03; 如果是偶数, 前缀是 02
# prefix = '03'
# else:
# prefix = '02'
# return prefix + point_x
# except:
# raise("array overindex")
# pass
def PrivKeyToPubKeyCompress(privKey):
'''
私钥-->公钥 压缩格式公钥
:param privKey: ( 如果是奇数,前缀是 03; 如果是偶数, 前缀是 02) + x轴坐标
:return:
'''
sk = ecdsa.SigningKey.from_string( unhexlify( privKey), curve=ecdsa.SECP256k1)
s = sk.get_verifying_key().to_string(encoding='compressed')
return hexlify(s).decode('latin')
#https://en.bitcoin.it/wiki/List_of_address_prefixes
def PubKeyToAddr(pubKey, isTestnet = False):
'''
公钥-->地址
:param pubKey:公钥
:param isTestnet:是否是测试网络
:return:地址
'''
ripemd160 = hashlib.new('ripemd160')
ripemd160.update(hashlib.sha256( unhexlify(pubKey)).digest())
if isTestnet:
return Base58CheckEncode(0x6F, ripemd160.digest()) #0x6F p2pkh testnet
# return base58CheckEncode(0x05, ripemd160.digest()) #05 p2sh mainnet
return Base58CheckEncode(0x00, ripemd160.digest()) #00 p2pkh mainnet
def PrivKeyToWIF(privKey, isTestnet = False):
'''
将私钥转为 WIF格式 , 用于比特币钱包导入
:param privKey: 私钥(16进制字符串)
:return: WIF格式的私钥
'''
if isTestnet:
return Base58CheckEncode(0xEF, unhexlify( privKey) ) #0xEF 测试网络
return Base58CheckEncode(0x80, unhexlify( privKey) ) #0x80 主网
def PrivKeyToWIFCompress(privKey, isTestnet = False):
'''
压缩格式
将私钥转为 WIF格式 , 用于比特币钱包导入
:param privKey: 私钥(16进制字符串)
:return: WIF格式的私钥
'''
if isTestnet:
return Base58CheckEncode(0xEF, unhexlify( privKey) + b'\01') #0xEF 测试网络
return Base58CheckEncode(0x80, unhexlify( privKey) + b'\01') #0x80 主网
#
# def GenPrivKey():
# '''
# 生成私钥, 使用 os.urandom (底层使用了操作系统的随机函数接口, 取决于CPU的性能,各种的硬件的数据指标)
# :return:私钥(16进制编码)
# '''
# return os.urandom(32).encode('hex') #生成 256位 私钥
def GenPrivKey():
'''
生成私钥, 使用 os.urandom (底层使用了操作系统的随机函数接口, 取决于CPU的性能,各种的硬件的数据指标)
:return:私钥(16进制编码)
'''
#2019-05-15 添加私钥范围限制
while True:
privKey = hexlify(os.urandom(32)) #生成 256位 私钥
if g_nMinPrivKey < int(privKey, 16) < g_nMaxPrivKey:
return privKey
def GenAddr(isTestnet=False):
'''
此函数用于C++调用,
:param isTestnet: 是否是测试网络
:return: (私钥, 公钥, 地址)
'''
privKey = GenPrivKey()
# print("privkey : " + privKey)
privKeyWIF = PrivKeyToWIF(privKey, isTestnet)
# print("privkey WIF:" + PrivKeyToWIF(privKey, isTestnet))
pubKey = PrivKeyToPubKey(privKey)
# print("pubkey : " + pubKey)
addr = PubKeyToAddr(pubKey, isTestnet)
# print("addr : " + addr)
return str(privKeyWIF), str(pubKey), str(addr)
def GenAddrCompress(isTestnet=False):
'''
此函数用于C++调用,
:param isTestnet: 是否是测试网络
:param isCompress: 是否压缩
:return: (私钥, 公钥, 地址)
'''
privKey = GenPrivKey()
# print("privkey : " + privKey)
privKeyWIF = PrivKeyToWIFCompress(privKey, isTestnet)
# print("privkey WIF:" + PrivKeyToWIF(privKey, isTestnet))
pubKey = PrivKeyToPubKeyCompress(privKey)
# print("pubkey : " + pubKey)
addr = PubKeyToAddr(pubKey, isTestnet)
# print("addr : " + addr)
return str(privKeyWIF), str(pubKey), str(addr)
def gen_addr_from_privkey(hex_priv_key : str, nettype: str) -> str:
"""
根据私钥生成地址
:param hex_priv_key: 十六进制格式字符串私钥
:return: 地址
"""
pubKey = PrivKeyToPubKeyCompress(hex_priv_key)
if nettype == 'mainnet':
address = PubKeyToAddr(pubKey, isTestnet=False)
else:
address = PubKeyToAddr(pubKey, isTestnet=True)
return address
def GenMultiAddr(nAddrCount = 1, isTestnet=True):
'''
生成多个地址
:param nAddrCount:
:param isTestnet:
:return:
'''
# return [("1111", "2222", "3333"), ("4444", "55555", "66666")]
# return [1, 2, 3, 4]
# return ["1111", "2222", "3333", "4444"]
lstRet = []
for i in range(nAddrCount):
lstRet.append(GenAddrCompress(isTestnet))
return lstRet
def good():
# isTestnet = True
isTestnet = False
private_key = GenPrivKey()
private_key = ''
print("privkey : " + private_key)
print("privkey WIF:" + PrivKeyToWIF(private_key, isTestnet))
pubKey = PrivKeyToPubKeyCompress(private_key)
print("pubkey : " + pubKey)
addr = PubKeyToAddr( pubKey , isTestnet)
print("addr : " + addr)
print("-----------------------------")
print("privkey WIF compress:" + PrivKeyToWIFCompress(private_key, isTestnet))
pubKey = PrivKeyToPubKeyCompress(private_key)
print("pubkey compress : " + pubKey)
addr = PubKeyToAddr( pubKey , isTestnet)
print("addr compress: " + addr)
def foo():
a = 100
b = b'\01\02\03'
# from binascii import a2b_hex
c = a.to_bytes(1, 'little') + b
print('c = {} '.format( hexlify(c)) )
pass
def main():
# foo()
good()
# for i in range(1):
# print(GenAddr(True))
if __name__ == '__main__':
main()
|
from multiprocessing import Process
def foo():
print('hello')
p = Process(target=foo)
p.start()
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gazoo Device Manager (gdm) is a single interface for device interactions.
It manages communication and setup of smart devices to simplify daily tasks
(software updating a unit, getting logs) as well as serving as a device driver
for automated (python) tests.
Areas of focus: ease of use (including ease of installation, setup, updating),
robustness, scalability.
Examples of functionality:
* Detecting and setting up devices when they are attached (create config
files automatically).
* Setting up shared permissions/file locations so all users can access test
hardware (if they can log onto the pc).
* Sharing a single device UART for logging and commands, to allow logs to be
streamed "live" during a test while still sending commands/responses during a
test.
Command Line Interface (CLI):
There two groups of commands.
"Manager" commands operate on local config files on the pc.
The command may talk to devices (detect), but they don't change the
state of the device (i.e. read-only).
The command may change the config files stored on the pc, to detect
new devices, add an alias, etc.
For manager commands, the format is
"gdm <command> <device> [<additional arguments>]".
For example: "gdm log cambrionix-1234".
"Device" commands talk to or configure connected devices.
This includes upgrade, reboot, etc.
For device commands, the format is
"gdm issue <device> - <command> [<arguments>]".
For example: "gdm issue cambrionix-1234 - reboot".
You can get more details on a particular command at the command line with
"gdm man DEVICENAME COMMAND" (does not require a connected device) or
"gdm issue DEVICENAME - COMMAND -- --help" (requires a connected device).
For example: "gdm issue cambrionix-1234 - reboot -- --help"
"gdm man cambrionix reboot"
Refer to https://github.com/google/gazoo-device for full documentation.
"""
import gc
import logging
import multiprocessing
import os
import signal
import sys
from gazoo_device import _version
from gazoo_device import gazoo_device_controllers
from gazoo_device import gdm_logger
from gazoo_device import manager
from gazoo_device import mobly_controller
from gazoo_device import package_registrar
from gazoo_device.utility import common_utils
Manager = manager.Manager
register = package_registrar.register
version = _version.version
__version__ = _version.version
# For Mobly integration
MOBLY_CONTROLLER_CONFIG_NAME = "GazooDevice"
create = mobly_controller.create
destroy = mobly_controller.destroy
get_info = mobly_controller.get_info
# Defend against inadvertent basicConfig, which adds log noise
logging.getLogger().addHandler(logging.NullHandler())
# Ensure that atexit handlers run when killed by SIGTERM
def graceful_exit(*args, **kwargs): # pylint: disable=unused-argument
raise SystemExit(0)
signal.signal(signal.SIGTERM, graceful_exit)
if sys.platform == "darwin" and sys.version_info >= (3, 8):
# Workaround for b/160958582: Python >= 3.8 defaults to "spawn" start method
# on Macs. GDM isn't compatible with "spawn" yet, so use the "fork" method
# instead.
multiprocessing.set_start_method("fork", force=True)
# Set up logger
gdm_logger.initialize_logger()
def _after_fork():
"""Re-enables garbage collection in both parent & child process."""
gc.enable()
def _before_fork():
"""Collects garbage, disables periodic GC, and flushes logger messages.
This ensures that the logging thread is not logging anything during os.fork()
calls. If the logging thread is logging during an os.fork() call, child
processes may be forked with an acquired stdout lock, which will cause a
deadlock when the child process finishes. Child processes attempt to flush the
stdout buffer before exiting and will hang indefinitely if forked with an
acquired stdout buffer lock.
"""
gc.disable()
gc.collect()
gdm_logger.flush_queue_messages()
# Periodic GC is active in child processes created via os.fork.
# Trigger a collection in the parent process prior to forking to prevent
# parent process's garbage from being copied over to the child and later being
# collected in each process. See b/150018610.
common_utils.register_at_fork(before=_before_fork,
after_in_parent=_after_fork,
after_in_child=_after_fork)
# Register device classes and capabilities built into GDM
package_registrar.register(gazoo_device_controllers)
|
#!/usr/bin/env python
# we're using python 3.x style print but want it to work in python 2.x,
from __future__ import print_function
import os
import argparse
import sys
import math
import subprocess
import shutil
import tempfile
import threading
parser = argparse.ArgumentParser(description="This script evaluates the probability of some "
"data (in text or gzipped-text format), given a language model "
"in a pocolm 'lm-dir' (as validated by validate_lm_dir.py). "
"The perplexity is printed to the standard output.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--max-memory", type=str, default='',
help="Memory limitation for sort.")
parser.add_argument("text_in", type=str,
help="Filename of input data (one sentence per line, no BOS or "
"EOS symbols; text or gzipped text")
parser.add_argument("lm_dir_in",
help="Source directory, for the input language model.")
args = parser.parse_args()
# Add the script dir and the src dir to the path.
os.environ['PATH'] = (os.environ['PATH'] + os.pathsep +
os.path.abspath(os.path.dirname(sys.argv[0])) + os.pathsep +
os.path.abspath(os.path.dirname(sys.argv[0])) + "/../src")
# this will affect the program "sort" that we call.
os.environ['LC_ALL'] = 'C'
if os.system("validate_lm_dir.py " + args.lm_dir_in) != 0:
sys.exit("get_data_prob.py: failed to validate input LM-dir")
# verify the input string max_memory
if args.max_memory != '':
# valid string max_memory must have at least two items
if len(args.max_memory) >= 2:
s = args.max_memory
# valid string max_memory can be formatted as:
# "a positive integer + a letter or a '%'" or "a positive integer"
# the unit of memory size can also be 'T', 'P', 'E', 'Z', or 'Y'. They
# are not included here considering their rare use in practice
if s[-1] in ['b', 'B', '%', 'k', 'K', 'm', 'M', 'g', 'G'] or s[-1].isdigit():
for x in s[:-1]:
if not x.isdigit():
sys.exit("get_data_prob.py: --max-memory should be formatted as "
"'a positive integer' or 'a positive integer appended "
"with 'b', 'K', 'M','G', or '%''.")
# max memory size must be larger than zero
if int(s[:-1]) == 0:
sys.exit("get_data_prob.py: --max-memory must be > 0 {unit}.".format(
unit=s[-1]))
else:
sys.exit("get_data_prob.py: the format of string --max-memory is not correct.")
else:
sys.exit("get_data_prob.py: the lenght of string --max-memory must >= 2.")
if args.max_memory[-1] == 'B': # sort seems not recognize 'B'
args.max_memory[-1] = 'b'
num_splits = None
if os.path.exists(args.lm_dir_in + "/num_splits"):
f = open(args.lm_dir_in + "/num_splits")
num_splits = int(f.readline())
f.close()
if not os.path.exists(args.text_in):
sys.exit("get_data_prob.py: input text data {0} does not exist".format(args.text_in))
def GetNgramOrder(lm_dir):
f = open(lm_dir + "/ngram_order")
return int(f.readline())
def RunCommand(command):
# print the command for logging
print(command, file=sys.stderr)
if os.system(command) != 0:
sys.exit("get_data_prob.py: error running command: " + command)
work_dir = tempfile.mkdtemp(dir=args.lm_dir_in)
# this temporary directory will be used by "sort".
os.environ['TMPDIR'] = work_dir
ngram_order = GetNgramOrder(args.lm_dir_in)
# set the memory restriction for "sort"
sort_mem_opt = ''
if args.max_memory != '':
sort_mem_opt = ("--buffer-size={0} ".format(args.max_memory))
# create
if args.text_in[-3:] == '.gz':
command = "gunzip -c {0} | text_to_int.py {1}/words.txt ".format(args.text_in,
args.lm_dir_in)
else:
command = "text_to_int.py {0}/words.txt <{1}".format(args.lm_dir_in,
args.text_in)
command += "| get-text-counts {0} | sort {1} | uniq -c | get-int-counts ".format(
ngram_order, sort_mem_opt)
if num_splits is None:
command += "{0}/int.dev".format(work_dir)
else:
command += "/dev/stdout | split-int-counts " + ' '.join([work_dir + "/int.dev." + str(n)
for n in range(1, num_splits + 1)])
RunCommand(command)
tot_num_words = 0.0
tot_logprob = 0.0
def ComputeProbs(split_index):
if split_index is None:
command = "compute-probs {0}/float.all {1}/int.dev".format(
args.lm_dir_in, work_dir)
else:
command = "compute-probs {0}/float.all.{2} {1}/int.dev.{2}".format(
args.lm_dir_in, work_dir, split_index)
print (command, file=sys.stderr)
try:
output = subprocess.check_output(command, shell=True, universal_newlines=True)
except:
sys.exit("get_data_prob.py: error running command: " + command)
[num_words, tot_objf] = output.split()
global tot_num_words, tot_logprob
tot_num_words += float(num_words)
tot_logprob += float(tot_objf)
if num_splits is None:
ComputeProbs(None)
else:
threads = []
for split_index in range(1, num_splits + 1):
threads.append(threading.Thread(target=ComputeProbs,
args=[split_index]))
threads[-1].start()
for t in threads:
t.join()
logprob = tot_logprob / tot_num_words
perplexity = math.exp(-logprob)
print("get_data_prob.py: log-prob of {0} given model {1} was "
"{2} per word [perplexity = {3}] over {4} words.".format(
args.text_in, args.lm_dir_in, logprob, perplexity,
tot_num_words), file=sys.stderr)
print(logprob, file=sys.stdout)
shutil.rmtree(work_dir)
|
import matplotlib as mpl
font = {'family' : 'serif',
'size' : 16}
mpl.rc('font', **font)
import numpy as np
randu = np.loadtxt("randu.txt")
randu_x = randu[:,0]
randu_y = randu[:,1]
import matplotlib.pyplot as plt
plt.figure()
plt.scatter(randu_x, randu_y)
plt.ylabel("$y$", fontsize=20)
plt.xlabel("$x$",fontsize=20)
plt.xlim(0,1)
plt.ylim(0,1)
plt.savefig('randu.pdf', bbox_inches='tight', transparent=True)
drand = np.loadtxt("drand.txt")
drand_x = drand[:,0]
drand_y = drand[:,1]
plt.figure()
plt.scatter(drand_x, drand_y)
plt.ylabel("$y$", fontsize=20)
plt.xlabel("$x$",fontsize=20)
plt.xlim(0,1)
plt.ylim(0,1)
plt.savefig('drand.pdf', bbox_inches='tight', transparent=True)
zoom_randu = np.loadtxt("randu_zoom.txt")
zoom_randu_x = zoom_randu[:,0]
zoom_randu_y = zoom_randu[:,1]
plt.figure()
plt.scatter(zoom_randu_x, zoom_randu_y)
plt.ylabel("$y$", fontsize=20)
plt.xlabel("$x$",fontsize=20)
plt.xlim(0.2,0.2005)
plt.ylim(0.3,0.3005)
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
plt.savefig('randu-zoom.pdf', bbox_inches='tight', transparent=True)
drand_zoom = np.loadtxt("drand_zoom.txt")
drand_zoom_x = drand_zoom[:,0]
drand_zoom_y = drand_zoom[:,1]
plt.figure()
plt.scatter(drand_zoom_x, drand_zoom_y)
plt.ylabel("$y$", fontsize=20)
plt.xlabel("$x$",fontsize=20)
plt.xlim(0.2,0.2005)
plt.ylim(0.3,0.3005)
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
plt.savefig('drand-zoom.pdf', bbox_inches='tight', transparent=True)
|
"""
面向对象技术简介
类(Class): 用来描述具有相同的属性和方法的对象的集合。它定义了该集合中每个对象所共有的属性和方法。对象是类的实例。
类变量:类变量在整个实例化的对象中是公用的。类变量定义在类中且在函数体之外。类变量通常不作为实例变量使用。
数据成员:类变量或者实例变量, 用于处理类及其实例对象的相关的数据。
方法重写:如果从父类继承的方法不能满足子类的需求,可以对其进行改写,这个过程叫方法的覆盖(override),也称为方法的重写。
局部变量:定义在方法中的变量,只作用于当前实例的类。
实例变量:在类的声明中,属性是用变量来表示的。这种变量就称为实例变量,是在类声明的内部但是在类的其他成员方法之外声明的。
继承:即一个派生类(derived class)继承基类(base class)的字段和方法。继承也允许把一个派生类的对象作为一个基类对象对待。
例如,有这样一个设计:一个Dog类型的对象派生自Animal类,这是模拟"是一个(is-a)"关系(例图,Dog是一个Animal)。
实例化:创建一个类的实例,类的具体对象。
方法:类中定义的函数。
对象:通过类定义的数据结构实例。对象包括两个数据成员(类变量和实例变量)和方法。
类的创建:
class ClassName:
'类的帮助信息' #类文档字符串
class_suite #类体
类的帮助信息可以通过ClassName.__doc__查看。
class_suite 由类成员,方法,数据属性组成。
"""
class MyClass:
userName = "root"
password = "123456"
""" 构造器 """
def __init__(self, age):
# public
self.age = age
# private
self.__age = age
""" 类里面的函数(公有方法) """
def findName(self, id):
if id == 1:
return "one"
elif id == 2:
return "two"
else:
return "others"
""" 类的私有方法 """
def __findTmp(self, id):
if id == 100:
return "999999999"
else:
return "000000000"
# 创建类对象,可以往构造器传参
myClass = MyClass(18)
# 获取类的成员变量
print("userName:{}, password:{}, age:{}".format(myClass.userName, myClass.password, myClass.age))
# 调用类方法(公有方法,调用不了私有方法)
print("findName:", myClass.findName(1))
|
# coding=utf-8
from OTLMOW.OTLModel.Classes.DirectioneleRelatie import DirectioneleRelatie
# Generated with OTLClassCreator. To modify: extend, do not edit
class IsInspectieVan(DirectioneleRelatie):
"""De relatie geeft aan dat er een inspectie, proef of meting gedaan kan worden op een onderdeel of installatie. De bron van de relatie is de inspectie, het doel het onderdeel of de installatie waarbij de inspectie hoort."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#IsInspectieVan'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
super().__init__()
|
#pylint: disable=import-error,invalid-name,broad-except
from pyrevit import revit, DB
from pyrevit import script
output = script.get_output()
logger = script.get_logger()
filt = DB.ElementCategoryFilter(DB.BuiltInCategory.OST_RoomTags)
collect = DB.FilteredElementCollector(revit.doc, revit.doc.ActiveView.Id).WhereElementIsNotElementType()
collect = collect.WherePasses(filt).ToElements()
categories = [DB.BuiltInCategory.OST_Rooms]
rooms = [x for x in revit.query.get_elements_by_categories(categories) if x.Area > 0]
if rooms:
with revit.Transaction("Center Rooms"):
for room in rooms:
bbox = room.get_BoundingBox(revit.doc.ActiveView)
center = (bbox.Max + bbox.Min) / 2.0
location = DB.UV(center.X, center.Y)
current_room = room.Location.Point
new_loc = center - current_room
room.Location.Move(new_loc)
if collect:
with revit.Transaction("Center Tags"):
for room_tag in collect:
room = room_tag.Room
bbox = room.get_BoundingBox(revit.doc.ActiveView)
center = (bbox.Max + bbox.Min) / 2.0
location = DB.UV(center.X, center.Y)
current_room = room.Location.Point
current_tag = room_tag.Location.Point
new_loc = center - current_tag
room_tag.Location.Move(new_loc)
|
#!/usr/bin/env python
# Copyright 2019 Jian Wu
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import random
import torch as th
import torch.nn as nn
import torch.nn.functional as tf
from typing import Optional, Tuple, Union
from aps.asr.base.component import OneHotEmbedding, PyTorchRNN
HiddenType = Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]
class PyTorchRNNDecoder(nn.Module):
"""
PyTorch's RNN decoder
"""
def __init__(self,
enc_proj: int,
vocab_size: int,
rnn: str = "lstm",
num_layers: int = 3,
hidden: int = 512,
dropout: float = 0.0,
input_feeding: bool = False,
onehot_embed: bool = False) -> None:
super(PyTorchRNNDecoder, self).__init__()
if not onehot_embed:
self.vocab_embed = nn.Embedding(vocab_size, hidden)
input_size = enc_proj + hidden
else:
self.vocab_embed = OneHotEmbedding(vocab_size)
input_size = enc_proj + vocab_size
self.decoder = PyTorchRNN(rnn,
input_size,
hidden,
num_layers,
dropout=dropout,
bidirectional=False)
self.proj = nn.Linear(hidden + enc_proj, enc_proj)
self.drop = nn.Dropout(p=dropout)
self.pred = nn.Linear(enc_proj, vocab_size)
self.input_feeding = input_feeding
self.vocab_size = vocab_size
def step_decoder(
self,
emb_pre: th.Tensor,
att_ctx: th.Tensor,
dec_hid: Optional[HiddenType] = None
) -> Tuple[th.Tensor, HiddenType]:
"""
Args
emb_pre: N x D_emb
att_ctx: N x D_enc
"""
# N x 1 x (D_emb+D_enc)
dec_in = th.cat([emb_pre, att_ctx], dim=-1).unsqueeze(1)
# N x 1 x (D_emb+D_enc) => N x 1 x D_dec
dec_out, hx = self.decoder(dec_in, hx=dec_hid)
# N x 1 x D_dec => N x D_dec
return dec_out.squeeze(1), hx
def step(
self,
att_net: nn.Module,
out_pre: th.Tensor,
enc_out: th.Tensor,
att_ctx: th.Tensor,
dec_hid: Optional[HiddenType] = None,
att_ali: Optional[th.Tensor] = None,
proj: Optional[th.Tensor] = None,
enc_len: Optional[th.Tensor] = None,
) -> Tuple[th.Tensor, th.Tensor, HiddenType, th.Tensor, th.Tensor]:
"""
Make a prediction step
"""
# N x D_emb or N x V
emb_pre = self.vocab_embed(out_pre)
# dec_out: N x D_dec
if self.input_feeding:
dec_out, dec_hid = self.step_decoder(emb_pre, proj, dec_hid=dec_hid)
else:
dec_out, dec_hid = self.step_decoder(emb_pre,
att_ctx,
dec_hid=dec_hid)
# att_ali: N x Ti, att_ctx: N x D_enc
att_ali, att_ctx = att_net(enc_out, enc_len, dec_out, att_ali)
# proj: N x D_enc
proj = self.proj(th.cat([dec_out, att_ctx], dim=-1))
proj = self.drop(tf.relu(proj))
# pred: N x V
pred = self.pred(proj)
return pred, att_ctx, dec_hid, att_ali, proj
def forward(self,
att_net: nn.Module,
enc_pad: th.Tensor,
enc_len: Optional[th.Tensor],
tgt_pad: th.Tensor,
schedule_sampling: float = 0) -> Tuple[th.Tensor, th.Tensor]:
"""
Args
enc_pad: N x Ti x D_enc
enc_len: N or None
tgt_pad: N x To
schedule_sampling:
1: using prediction
0: using ground truth
Return
outs: N x To x V
alis: N x To x T
"""
N, _, D_enc = enc_pad.shape
outs = [] # collect prediction
att_ali = None # attention alignments
dec_hid = None
device = enc_pad.device
# zero init context
att_ctx = th.zeros([N, D_enc], device=device)
proj = th.zeros([N, D_enc], device=device)
alis = [] # collect alignments
# step by step
# 0 1 2 3 ... T
# SOS t0 t1 t2 ... t{T-1}
# t0 t1 t2 t3 ... EOS
for t in range(tgt_pad.shape[-1]):
# using output at previous time step
# out: N
if t and random.random() < schedule_sampling:
tok_pre = th.argmax(outs[-1].detach(), dim=1)
else:
tok_pre = tgt_pad[:, t]
# step forward
pred, att_ctx, dec_hid, att_ali, proj = self.step(att_net,
tok_pre,
enc_pad,
att_ctx,
dec_hid=dec_hid,
att_ali=att_ali,
enc_len=enc_len,
proj=proj)
outs.append(pred)
alis.append(att_ali)
# N x To x V
outs = th.stack(outs, dim=1)
# N x To x Ti
alis = th.stack(alis, dim=1)
return outs, alis
|
def maxProfit(prices):
start = 1
last = 0
l = len(prices)
profit = 0
while start < l:
if prices[last] >= prices[start]:
last = start
start += 1
elif prices[start] > prices[last]:
cur_profit = prices[start] - prices[last]
if cur_profit > profit:
profit = cur_profit
start += 1
return profit
if __name__ == "__main__":
prices = [7,1,5,3,6,4]
print(maxProfit(prices))
|
import pandas as pd
import matplotlib.pyplot as plt
my_dataset = pd.read_excel('Smith_glass_post_NYT_data.xlsx', sheet_name='Supp_traces')
my_range = my_dataset['Zr'].max()- my_dataset['Zr'].min()
print ('-------')
print ('Range')
print("{0:.0f}".format(my_range))
print ('-------')
fig, ax = plt.subplots()
ax.hist(my_dataset.Zr, bins=20, density=True, edgecolor='k', label='Measurements Hist')
ax.axvline(my_dataset['Zr'].max(), color='purple', label='Max value', linewidth=2)
ax.axvline(my_dataset['Zr'].min(), color='green', label='Min value', linewidth=2)
ax.axvspan(my_dataset['Zr'].min(), my_dataset['Zr'].max(), alpha=0.1, color='orange', label='Range = ' + "{0:.0f}".format(my_range) + ' ppm')
ax.set_xlabel('Zr [ppm]')
ax.set_ylabel('Probability density')
ax.legend()
|
import torch
from model.san import san
from util.complexity import get_model_complexity_info
with torch.cuda.device(0):
model = san(sa_type=0, layers=[2, 1, 2, 4, 1], kernels=[3, 7, 7, 7, 7], num_classes=1000)
# model = san(sa_type=0, layers=[3, 2, 3, 5, 2], kernels=[3, 7, 7, 7, 7], num_classes=1000)
# model = san(sa_type=0, layers=[3, 3, 4, 6, 3], kernels=[3, 7, 7, 7, 7], num_classes=1000)
# model = san(sa_type=1, layers=[2, 1, 2, 4, 1], kernels=[3, 7, 7, 7, 7], num_classes=1000)
# model = san(sa_type=1, layers=[3, 2, 3, 5, 2], kernels=[3, 7, 7, 7, 7], num_classes=1000)
# model = san(sa_type=1, layers=[3, 3, 4, 6, 3], kernels=[3, 7, 7, 7, 7], num_classes=1000)
flops, params = get_model_complexity_info(model.cuda(), (3, 224, 224), as_strings=True, print_per_layer_stat=True)
print('Params/Flops: {}/{}'.format(params, flops))
|
import json
from pathlib import Path
from typing import Dict, Any
from typing import List
from typing import NamedTuple
from typing import Union
from yukarin.param import AcousticParam
from yukarin.utility.json_utility import JSONEncoder
class DatasetConfig(NamedTuple):
acoustic_param: AcousticParam
input_glob: Path
target_glob: Path
indexes_glob: Path
in_features: List[str]
out_features: List[str]
train_crop_size: int
input_global_noise: float
input_local_noise: float
target_global_noise: float
target_local_noise: float
seed: int
num_test: int
class ModelConfig(NamedTuple):
in_channels: int
out_channels: int
generator_base_channels: int
generator_extensive_layers: int
discriminator_base_channels: int
discriminator_extensive_layers: int
weak_discriminator: bool
glu_generator: bool
class LossConfig(NamedTuple):
mse: float
adversarial: float
class TrainConfig(NamedTuple):
batchsize: int
gpu: int
log_iteration: int
snapshot_iteration: int
stop_iteration: int
optimizer: Dict[str, Any]
pretrained_model: Path
class ProjectConfig(NamedTuple):
name: str
tags: List[str]
class Config(NamedTuple):
dataset: DatasetConfig
model: ModelConfig
loss: LossConfig
train: TrainConfig
project: ProjectConfig
def save_as_json(self, path):
d = _namedtuple_to_dict(self)
json.dump(d, open(path, 'w'), indent=2, sort_keys=True, cls=JSONEncoder)
def _namedtuple_to_dict(o: NamedTuple):
return {
k: v if not hasattr(v, '_asdict') else _namedtuple_to_dict(v)
for k, v in o._asdict().items()
}
def create_from_json(s: Union[str, Path]):
d = json.load(open(s))
backward_compatible(d)
return Config(
dataset=DatasetConfig(
acoustic_param=AcousticParam(**d['dataset']['acoustic_param']),
input_glob=Path(d['dataset']['input_glob']),
target_glob=Path(d['dataset']['target_glob']),
indexes_glob=Path(d['dataset']['indexes_glob']),
in_features=d['dataset']['in_features'],
out_features=d['dataset']['out_features'],
train_crop_size=d['dataset']['train_crop_size'],
input_global_noise=d['dataset']['input_global_noise'],
input_local_noise=d['dataset']['input_local_noise'],
target_global_noise=d['dataset']['target_global_noise'],
target_local_noise=d['dataset']['target_local_noise'],
seed=d['dataset']['seed'],
num_test=d['dataset']['num_test'],
),
model=ModelConfig(
in_channels=d['model']['in_channels'],
out_channels=d['model']['out_channels'],
generator_base_channels=d['model']['generator_base_channels'],
generator_extensive_layers=d['model']['generator_extensive_layers'],
discriminator_base_channels=d['model']['discriminator_base_channels'],
discriminator_extensive_layers=d['model']['discriminator_extensive_layers'],
weak_discriminator=d['model']['weak_discriminator'],
glu_generator=d['model']['glu_generator'],
),
loss=LossConfig(
mse=d['loss']['mse'],
adversarial=d['loss']['adversarial'],
),
train=TrainConfig(
batchsize=d['train']['batchsize'],
gpu=d['train']['gpu'],
log_iteration=d['train']['log_iteration'],
snapshot_iteration=d['train']['snapshot_iteration'],
stop_iteration=d['train']['stop_iteration'],
optimizer=d['train']['optimizer'],
pretrained_model=d['train']['pretrained_model'],
),
project=ProjectConfig(
name=d['project']['name'],
tags=d['project']['tags'],
)
)
def backward_compatible(d: Dict):
if 'glu_generator' not in d['model']:
d['model']['glu_generator'] = False
if 'features' in d['dataset']:
d['dataset']['in_features'] = d['dataset']['features']
d['dataset']['out_features'] = d['dataset']['features']
if 'optimizer' not in d['train']:
d['train']['optimizer'] = dict(
name='Adam',
alpha=0.0002,
beta1=0.5,
beta2=0.999,
)
if 'pretrained_model' not in d['train']:
d['train']['pretrained_model'] = None
|
#!/usr/bin/env python
import hashlib
import io
import json
import multiprocessing
import os
import sys
from collections import OrderedDict
from unittest.mock import call, mock_open, patch
import pytest
import requests
import wasapi_client as wc
WASAPI_URL = 'http://example.com/webdata'
WASAPI_TEXT = "".join("""{
"count": 2,
"files": [
{
"account": 1,
"checksums": {
"md5": "61f818912d1f39bc9dd15d4b87461110",
"sha1": "edef6bca652d75d0587ef411d5f028335341b074"
},
"collection": 7967,
"crawl": 256123,
"crawl-start": "2016-12-22T14:07:24Z",
"crawl-time": "2016-12-22T18:55:12Z",
"filename": "AIT-JOB256123-00000.warc.gz",
"filetype": "warc",
"locations": [
"https://warcs.example.com/webdatafile/AIT-JOB256123-00000.warc.gz",
"https://example.com/download/AIT-JOB256123-00000.warc.gz"
],
"size": 943100093
},
{
"account": 1,
"checksums": {
"md5": "748120fd9672b22df5942bb44e9cde81",
"sha1": "54a466421471ef7d8cb4d6bbfb85afd76022a378"
},
"collection": 7967,
"crawl": 256118,
"crawl-start": "2016-12-22T14:01:53Z",
"crawl-time": "2016-12-22T14:01:58Z",
"filename": "ARCHIVEIT-JOB256118-00000.warc.gz",
"filetype": "warc",
"locations": [
"https://warcs.example.com/webdatafile/AIT-JOB256118-00000.warc.gz",
"https://example.com/download/AIT-JOB256118-00000.warc.gz"
],
"size": 6265488
}
],
"includes-extra": false,
"next": null,
"previous": null,
"request-url": "https://example.com/wasapi/v1/webdata"
}""".split())
NO_FILES = """{
"count": 0,
"files": [],
"request-url": "https://example.com/wasapi/v1/webdata",
"includes-extra": false,
"next": null,
"previous": null
}"""
class MockResponse200:
"""A mocked successful requests GET response from WASAPI."""
def __init__(self, text=WASAPI_TEXT):
self.status_code = 200
self.text = text
self.reason = 'OK'
def json(self):
return json.loads(self.text)
class MockResponse403:
"""A mocked unsuccessful requests GET response from WASAPI."""
def __init__(self):
self.status_code = 403
self.reason = 'Forbidden'
class Test_make_session:
def test_make_session_auth(self):
auth = ('user', 'pass')
session = wc.make_session(auth)
assert session.auth == auth
def test_make_session_no_auth(self):
session = wc.make_session(None)
assert session.auth is None
class Test_get_webdata:
def test_get_webdata(self):
"""Test a successful response."""
session = requests.Session()
with patch.object(session, 'get', return_value=MockResponse200()):
response = wc.get_webdata(WASAPI_URL, session)
# Compare with whitespace stripped.
response_text = "".join(json.dumps(response, sort_keys=True).split())
assert response_text == WASAPI_TEXT
def test_get_webdata_403_forbidden(self):
"""Test bad authentication handling."""
session = requests.Session()
with patch.object(session, 'get', return_value=MockResponse403()):
with pytest.raises(SystemExit):
wc.get_webdata(WASAPI_URL, session)
def test_get_webdata_ConnectionError(self):
"""Test host connection isn't made."""
session = requests.Session()
error = requests.exceptions.ConnectionError
with patch.object(session, 'get', side_effect=error):
with pytest.raises(SystemExit):
wc.get_webdata(WASAPI_URL, session)
def test_get_webdata_json_error(self):
"""Test 200 non-JSON repsonse exits."""
session = requests.Session()
text = 'response text is not json'
with patch.object(session, 'get', return_value=MockResponse200(text)):
with pytest.raises(SystemExit):
wc.get_webdata(WASAPI_URL, session)
@patch('requests.Session')
class Test_Downloads:
def test_populate_downloads(self, mock_session):
"""Test a queue is returned with expected data."""
mock_session.return_value.get.return_value = MockResponse200()
downloads = wc.Downloads(WASAPI_URL, download=True)
j_queue = downloads.get_q
assert j_queue.qsize() == 2
# Drain the JoinableQueue to avoid BrokenPipeError.
# There could be a better way to handle this...
while j_queue.qsize():
q_item = j_queue.get()
assert isinstance(q_item, wc.DataFile)
j_queue.task_done()
def test_populate_downloads_multi_page(self, mock_session):
"""Test the queue returned for multiple results pages."""
# Give the first of our two page responses a next page URL.
p1 = WASAPI_TEXT.replace('"next":null', '"next":"http://test?page=2"')
responses = [MockResponse200(p1), MockResponse200()]
mock_session.return_value.get.side_effect = responses
downloads = wc.Downloads(WASAPI_URL, download=True)
j_queue = downloads.get_q
assert j_queue.qsize() == 4
# Drain the JoinableQueue to avoid BrokenPipeError.
while j_queue.qsize():
q_item = j_queue.get()
assert isinstance(q_item, wc.DataFile)
j_queue.task_done()
def test_populate_downloads_no_get_q(self, mock_session):
"""Test download=False prevents get_q attribute existing."""
mock_session.return_value.get.return_value = MockResponse200()
downloads = wc.Downloads(WASAPI_URL, download=False)
with pytest.raises(AttributeError):
getattr(downloads, 'get_q')
def test_populate_downloads_urls(self, mock_session):
"""Test urls is populated with first location per file."""
mock_session.return_value.get.return_value = MockResponse200()
downloads = wc.Downloads(WASAPI_URL, download=False)
assert len(downloads.urls) == 2
for url in ['https://warcs.example.com/webdatafile/AIT-JOB256123-00000.warc.gz',
'https://warcs.example.com/webdatafile/AIT-JOB256118-00000.warc.gz']:
assert url in downloads.urls
def test_populate_downloads_manifest(self, mock_session):
"""Test the checksums dict is populated."""
mock_session.return_value.get.return_value = MockResponse200()
downloads = wc.Downloads(WASAPI_URL, download=False)
assert len(downloads.checksums)
assert downloads.checksums['md5'] == [('61f818912d1f39bc9dd15d4b87461110',
'AIT-JOB256123-00000.warc.gz'),
('748120fd9672b22df5942bb44e9cde81',
'ARCHIVEIT-JOB256118-00000.warc.gz')]
assert downloads.checksums['sha1'] == [('edef6bca652d75d0587ef411d5f028335341b074',
'AIT-JOB256123-00000.warc.gz'),
('54a466421471ef7d8cb4d6bbfb85afd76022a378',
'ARCHIVEIT-JOB256118-00000.warc.gz')]
def test_populate_downloads_manifest_destination(self, mock_session):
"""Test the checksums dict is populated with destination included."""
mock_session.return_value.get.return_value = MockResponse200()
downloads = wc.Downloads(WASAPI_URL, download=False, destination='{}tmp'.format(os.sep))
assert len(downloads.checksums)
assert downloads.checksums['md5'] == [
('61f818912d1f39bc9dd15d4b87461110',
os.path.normpath('/tmp/AIT-JOB256123-00000.warc.gz')),
('748120fd9672b22df5942bb44e9cde81',
os.path.normpath('/tmp/ARCHIVEIT-JOB256118-00000.warc.gz'))
]
assert downloads.checksums['sha1'] == [
('edef6bca652d75d0587ef411d5f028335341b074',
os.path.normpath('/tmp/AIT-JOB256123-00000.warc.gz')),
('54a466421471ef7d8cb4d6bbfb85afd76022a378',
os.path.normpath('/tmp/ARCHIVEIT-JOB256118-00000.warc.gz'))
]
def test_populate_downloads_generate_manifest(self, mock_session, tmpdir):
"""Test checksum files are created for all algorithms."""
mock_session.return_value.get.return_value = MockResponse200()
sub_dir = 'downloads'
dest = tmpdir.mkdir(sub_dir)
downloads = wc.Downloads(WASAPI_URL, download=False, destination=str(dest))
downloads.generate_manifests()
sub_dir_contents = dest.listdir()
assert len(sub_dir_contents) == 2
for name in ['manifest-md5.txt', 'manifest-sha1.txt']:
assert dest.join(name) in sub_dir_contents
def test_write_manifest_file(self, mock_session, tmpdir):
"""Test a manifest file is written for the given algorithm."""
mock_session.return_value.get.return_value = MockResponse200()
sub_dir = 'downloads'
dest = tmpdir.mkdir(sub_dir)
downloads = wc.Downloads(WASAPI_URL, download=False, destination=str(dest))
downloads.write_manifest_file('sha1')
assert len(dest.listdir()) == 1
txt = (
'edef6bca652d75d0587ef411d5f028335341b074 {p}{s}AIT-JOB256123-00000.warc.gz\n'
'54a466421471ef7d8cb4d6bbfb85afd76022a378 {p}{s}ARCHIVEIT-JOB256118-00000.warc.gz\n'
)
assert dest.join('manifest-sha1.txt').read() == txt.format(p=dest, s=os.sep)
def test_write_manifest_file_wrong_algorithm(self, mock_session, tmpdir):
"""Test writing a manifest file for an algorithm we don't have."""
mock_session.return_value.get.return_value = MockResponse200()
sub_dir = 'downloads'
dest = tmpdir.mkdir(sub_dir)
downloads = wc.Downloads(WASAPI_URL, download=False, destination=str(dest))
with pytest.raises(wc.WASAPIManifestError):
downloads.write_manifest_file('sha2')
@patch('requests.Session')
class Test_get_files_count:
def test_get_files_count(self, mock_session):
mock_session.return_value.get.return_value = MockResponse200()
count = wc.get_files_count(WASAPI_URL)
assert count == 2
@patch('requests.Session')
class Test_get_files_size:
def test_get_files_size(self, mock_session):
mock_session.return_value.get.return_value = MockResponse200()
count, total = wc.get_files_size(WASAPI_URL)
assert count == 2
assert total == 949365581
def test_get_files_size_multi_page(self, mock_session):
# Give the first of our two page responses a next page URL.
p1 = WASAPI_TEXT.replace('"next":null',
'"next":"{}?page=2"'.format(WASAPI_URL))
# The value for `count` is pulled from the last page. Though,
# in actuality, `count` should be same on all pages.
p2 = WASAPI_TEXT.replace('"count":2', '"count":4')
responses = [MockResponse200(p1), MockResponse200(p2)]
mock_session.return_value.get.side_effect = responses
count, total = wc.get_files_size(WASAPI_URL)
assert count == 4
assert total == 949365581 * 2
def test_get_files_size_no_files(self, mock_session):
mock_session.return_value.get.return_value = MockResponse200(NO_FILES)
count, total = wc.get_files_size(WASAPI_URL)
assert count == 0
assert total == 0
class Test_convert_bytes:
@pytest.mark.parametrize('size, expected', [
(0, '0.0B'),
(1023, '1023.0B'),
(1024, '1.0KB'),
(1024000, '1000.0KB'),
(1048576, '1.0MB'),
(1073741824, '1.0GB'),
(1099511628000, '1.0TB')
])
def test_convert_bytes(self, size, expected):
assert wc.convert_bytes(size) == expected
class Test_download_file:
locations = ['http://loc1/blah.warc.gz', 'http://loc2/blah.warc.gz']
filename = 'blah.warc.gz'
checksums = {'sha1': '33304d104f95d826da40079bad2400dc4d005403',
'md5': '62f87a969af0dd857ecd6c3e7fde6aed'}
size = 12345678
data_file = wc.DataFile(locations, filename, checksums, size)
def test_download_file_200(self):
session = requests.Session()
mock_200 = MockResponse200('')
with patch.object(session, 'get', return_value=mock_200) as mock_get, \
patch('wasapi_client.write_file') as mock_write_file:
file_data = wc.download_file(self.data_file, session, self.filename)
# Check we only tried downloading files until successful download.
mock_get.assert_called_once_with(self.locations[0], stream=True)
mock_write_file.assert_called_once_with(mock_200, self.filename)
assert not file_data.verified
def test_download_file_not_200(self):
session = requests.Session()
mock_403 = MockResponse403()
with patch.object(session, 'get', return_value=mock_403) as mock_get, \
pytest.raises(wc.WASAPIDownloadError) as err:
wc.download_file(self.data_file, session, self.filename)
for item in (str(self.locations), self.filename):
assert item in str(err)
# Check all locations were tried.
calls = [call(self.locations[0], stream=True),
call(self.locations[1], stream=True)]
mock_get.assert_has_calls(calls)
def test_download_file_OSError(self):
session = requests.Session()
mock_200 = MockResponse200('')
with patch.object(session, 'get', return_value=mock_200) as mock_get, \
patch('wasapi_client.write_file') as mock_write_file:
mock_write_file.side_effect = OSError
with pytest.raises(wc.WASAPIDownloadError) as err:
wc.download_file(self.data_file, session, self.filename)
for item in (str(self.locations), self.filename):
assert item in str(err)
# Check we only tried downloading files until successful download.
mock_get.assert_called_once_with(self.locations[0], stream=True)
mock_write_file.assert_called_once_with(mock_200, self.filename)
def test_download_check_exists_true(self):
"""Test a file already existing on the filesystem is not downloaded."""
with patch('wasapi_client.check_exists', return_value=True), \
patch('requests.Session', autospec=True) as mock_session:
file_data = wc.download_file(self.data_file, mock_session, self.filename)
# Check `verified` has been set True on the FileData instance.
assert file_data.verified
# Check that no get request was made.
assert not mock_session.get.called
class Test_check_exists:
def test_check_exists_return_true(self):
checksums = {'sha1': '33304d104f95d826da40079bad2400dc4d005403'}
with patch('os.path.isfile', return_value=True), \
patch('os.path.getsize', return_value=123456), \
patch('wasapi_client.verify_file', return_value=True) as mock_verify:
assert wc.check_exists('path', 123456, checksums)
mock_verify.assert_called_once_with(checksums, 'path')
@patch('os.path.isfile', return_value=False)
@patch('os.path.getsize')
def test_check_exists_no_file(self, mock_getsize, mock_isfile):
assert not wc.check_exists('path', 123456, {})
mock_isfile.assert_called_once_with('path')
assert not mock_getsize.called
@patch('os.path.isfile', return_value=True)
@patch('os.path.getsize', return_value=123456)
@patch('wasapi_client.verify_file')
def test_check_exists_file_size_mismatch(self, mock_verify, mock_getsize, mock_isfile):
assert not wc.check_exists('path', 789, {})
mock_isfile.assert_called_once_with('path')
mock_getsize.assert_called_once_with('path')
assert not mock_verify.called
def test_check_exists_checksum_fail(self):
with patch('os.path.isfile', return_value=True), \
patch('os.path.getsize', return_value=123456), \
patch('wasapi_client.verify_file', return_value=False) as mock_verify:
assert not wc.check_exists('path', 123456, {})
mock_verify.assert_called_once_with({}, 'path')
class Test_verify_file:
@patch('wasapi_client.calculate_sum')
def test_verify_file(self, mock_calc_sum):
"""Test a matching checksum returns True."""
checksum = '33304d104f95d826da40079bad2400dc4d005403'
checksums = {'sha1': checksum}
mock_calc_sum.return_value = checksum
assert wc.verify_file(checksums, 'dummy/path')
def test_verify_file_unsupported_algorithm(self):
"""Test all algorithms being unsupported returns False."""
checksums = {'shaq1': 'shaq1algorithmdoesnotexist'}
assert not wc.verify_file(checksums, 'dummy/path')
@patch('wasapi_client.calculate_sum')
def test_verify_file_checksum_mismatch(self, mock_calc_sum):
"""Test calculated checksum does not match the expected."""
checksum = '33304d104f95d826da40079bad2400dc4d005403'
algorithm = 'sha1'
path = 'dummy/path'
checksums = {algorithm: checksum}
mock_calc_sum.return_value = checksum + 'notmatching'
with patch('wasapi_client.LOGGER', autospec=True) as mock_logger:
assert not wc.verify_file(checksums, path)
msg = 'Checksum {} mismatch for {}: expected {}, got {}notmatching'.format(algorithm,
path,
checksum,
checksum)
mock_logger.error.assert_called_once_with(msg)
@patch('wasapi_client.calculate_sum')
def test_verify_file_one_supported_algorithm(self, mock_calc_sum):
"""Test one unsupported/one supported algorithm returns True."""
checksum = '33304d104f95d826da40079bad2400dc4d005403'
checksums = OrderedDict([('abc', 'algorithm_unsupported'),
('sha1', checksum)])
mock_calc_sum.return_value = checksum
with patch('wasapi_client.LOGGER', autospec=True) as mock_logger:
assert wc.verify_file(checksums, 'dummy/path')
# Check that unsupported algorithm was tried.
mock_logger.debug.assert_called_once_with('abc is unsupported')
mock_logger.info.assert_called_once_with('Checksum success at: dummy/path')
class Test_calculate_sum:
@pytest.mark.skipif(sys.version_info < (3, 4, 4), reason=('bug via mock_open '
'https://github.com/python/cpython/commit/86b34d'))
def test_calculate_sum(self):
data = 'data from file'.encode('utf-8')
with patch('builtins.open', mock_open(read_data=data)):
checksum = wc.calculate_sum(hashlib.sha1, 'dummy/path')
assert checksum == hashlib.sha1(data).hexdigest()
class Test_convert_queue:
def test_convert_queue(self):
m = multiprocessing.Manager()
q = m.Queue()
q.put(('success', 'name1'))
q.put(('failure', 'name2'))
dict_from_q = wc.convert_queue(q)
assert dict_from_q['success'] == ['name1']
assert dict_from_q['failure'] == ['name2']
m.shutdown()
class Test_generate_report:
def test_generate_report_all_success(self):
m = multiprocessing.Manager()
q = m.Queue()
q.put(('success', 'name1'))
q.put(('success', 'name2'))
report = wc.generate_report(q)
assert report == ('Total downloads attempted: 2\n'
'Successful downloads: 2\n'
'Failed downloads: 0\n')
m.shutdown()
def test_generate_report_one_failure(self):
m = multiprocessing.Manager()
q = m.Queue()
q.put(('success', 'name1'))
q.put(('failure', 'name2'))
report = wc.generate_report(q)
assert report == ('Total downloads attempted: 2\n'
'Successful downloads: 1\n'
'Failed downloads: 1\n'
'Failed files (see log for details):\n'
' name2\n')
m.shutdown()
def test_generate_report_all_failure(self):
m = multiprocessing.Manager()
q = m.Queue()
q.put(('failure', 'name1'))
q.put(('failure', 'name2'))
report = wc.generate_report(q)
assert report == ('Total downloads attempted: 2\n'
'Successful downloads: 0\n'
'Failed downloads: 2\n')
m.shutdown()
class TestDownloader:
locations = ['http://loc1/blah.warc.gz', 'http://loc2/blah.warc.gz']
filename = 'blah.warc.gz'
checksums = {'sha1': '33304d104f95d826da40079bad2400dc4d005403',
'md5': '62f87a969af0dd857ecd6c3e7fde6aed'}
size = 12345678
data_file = wc.DataFile(locations, filename, checksums, size)
def test_run(self):
"""Test downloader when downloads are successful."""
# Create a queue holding two sets of file data.
get_q = multiprocessing.JoinableQueue()
for _ in (1, 2):
get_q.put(self.data_file)
result_q = multiprocessing.Queue()
log_q = multiprocessing.Queue()
with patch('wasapi_client.verify_file', return_value=True), \
patch('wasapi_client.download_file', return_value=self.data_file):
p = wc.Downloader(get_q, result_q, log_q)
p.start()
p.run()
# If the join doesn't block, the queue is fully processed.
get_q.join()
assert result_q.qsize() == 2
assert log_q.qsize() == 0
for _ in (1, 2):
assert result_q.get() == ('success', self.filename)
@patch('wasapi_client.download_file')
def test_run_WASAPIDownloadError(self, mock_download):
"""Test downloader when downloads fail."""
mock_download.side_effect = wc.WASAPIDownloadError()
# Create a queue holding two sets of file data.
get_q = multiprocessing.JoinableQueue()
for _ in (1, 2):
get_q.put(self.data_file)
result_q = multiprocessing.Queue()
log_q = multiprocessing.Queue()
p = wc.Downloader(get_q, result_q, log_q)
p.start()
p.run()
# If the join doesn't block, the queue is fully processed.
get_q.join()
assert result_q.qsize() == 2
assert log_q.qsize() == 2
for _ in (1, 2):
assert result_q.get() == ('failure', self.filename)
def test_run_file_already_verified(self):
"""Test a downloaded file is not verified twice."""
return_data_file = wc.DataFile(self.locations, self.filename, self.checksums, self.size)
return_data_file.verified = True
# Create a queue holding two sets of file data.
get_q = multiprocessing.JoinableQueue()
for _ in (1, 2):
get_q.put(self.data_file)
result_q = multiprocessing.Queue()
log_q = multiprocessing.Queue()
with patch('wasapi_client.verify_file', return_value=True) as mock_verify, \
patch('wasapi_client.download_file', return_value=return_data_file):
p = wc.Downloader(get_q, result_q, log_q)
p.start()
p.run()
# If the join doesn't block, the queue is fully processed.
get_q.join()
assert result_q.qsize() == 2
assert log_q.qsize() == 0
for _ in (1, 2):
assert result_q.get() == ('success', self.filename)
# Check verify_exists was not called, since it was called in `download_file`.
assert not mock_verify.called
class Test_parse_args:
@patch('wasapi_client.multiprocessing.cpu_count')
def test_default_processes(self, mock_cpu_count):
"""Test handling of cpu_count() erroring.
Could happen when cpu_count isn't implemented on a platform
and --processes isn't specified by the user.
"""
mock_cpu_count.side_effect = NotImplementedError
args = wc._parse_args(['--crawl', '12'])
assert args.processes == 1
def test_SetQueryParametersAction(self):
"""Test that arguments passed with this action are in query_params."""
args = wc._parse_args(['--crawl-start-after',
'2016-12-22T13:01:00',
'--crawl-start-before',
'2016-12-22T15:11:00',
'-c'])
assert len(args.query_params) == 2
assert args.query_params['crawl-start-after'] == '2016-12-22T13:01:00'
assert args.query_params['crawl-start-before'] == '2016-12-22T15:11:00'
def test_SetQueryParametersAction_multiple_collections(self):
"""Test multiple collections end up in query_params.
A query can have multiple collections, so test that the
user can supply multiple values.
"""
args = wc._parse_args(['--collection', '12345', '98', '--crawl', '12'])
assert len(args.query_params) == 2
assert args.query_params['collection'] == ['12345', '98']
class Test_get_credentials_env:
def test_get_credentials_env(self):
"""Test auth credentials are set from environment variables."""
with patch.dict('os.environ', {'WASAPI_USER': 'me', 'WASAPI_PASS': 'p@ss123'}):
auth = wc.get_credentials_env()
assert auth == ('me', 'p@ss123')
def test_get_credentials_env_missing_one_env_var(self):
"""Test a None value for username or password causes no auth."""
with patch('os.environ.get') as mock_get:
mock_get.side_effect = ['me', None]
auth = wc.get_credentials_env()
assert auth is None
class Test_get_credentials_config:
def test_get_credentials_config(self):
"""Test auth can be populated from a config file."""
stream = io.StringIO('[unt]\nusername = me\npassword = p@ss123')
with patch('builtins.open', return_value=stream):
auth = wc.get_credentials_config('unt')
assert auth == ('me', 'p@ss123')
def test_get_credentials_config_missing_profile(self):
"""Test program exits if the profile supplied doesn't exist."""
stream = io.StringIO('[unt]\nusername = me\npassword = p@ss123')
with patch('builtins.open', return_value=stream), \
pytest.raises(SystemExit):
wc.get_credentials_config('home')
def test_get_credentials_config_missing_password(self):
"""Test program exits if config does not supply an expected option."""
stream = io.StringIO('[unt]\nusername = me')
with patch('builtins.open', return_value=stream), \
pytest.raises(SystemExit):
wc.get_credentials_config('unt')
class Test_get_credentials:
@patch('getpass.getpass', return_value='p@ss123')
def test_get_credentials_from_getpass(self, mock_getpass):
auth = wc.get_credentials(user='me')
assert auth == ('me', 'p@ss123')
mock_getpass.assert_called_once_with()
@patch('wasapi_client.get_credentials_env', return_value=('me', 'p@ss123'))
def test_get_credentials_from_env(self, mock_gce):
auth = wc.get_credentials()
assert auth == ('me', 'p@ss123')
mock_gce.assert_called_once_with()
@patch('wasapi_client.get_credentials_env', return_value=None)
@patch('wasapi_client.get_credentials_config', return_value=('me', 'p@ss123'))
def test_get_credentials_from_config(self, mock_gcc, mock_gce):
auth = wc.get_credentials(profile='unt')
assert auth == ('me', 'p@ss123')
mock_gcc.assert_called_once_with('unt')
mock_gce.assert_called_once_with()
@patch('wasapi_client.get_credentials_env', return_value=None)
@patch('wasapi_client.get_credentials_config')
def test_get_credentials_no_credentials_provided(self, mock_gcc, mock_gce):
"""Test if no user/profile is provided and no valid config file exists."""
auth = wc.get_credentials()
assert auth is None
assert not mock_gcc.called
mock_gce.assert_called_once_with()
|
from ditto.store import Store
from ditto.writers.json.write import Writer
from ditto.readers.cyme.read import Reader as Reader_cyme
from ditto.readers.opendss.read import Reader as Reader_opendss
import os
def update_persistence_jsons():
test_list = os.walk('data')
for (dirpath, dirname, files) in test_list:
if files !=[]:
reader_type = dirpath.split('\\')[2]
m = Store()
if reader_type == 'opendss':
reader = Reader_opendss(master_file = os.path.join('..',dirpath,'master.dss'), buscoordinates_file = os.path.join('..',dirpath,'buscoord.dss'))
elif reader_type == 'cyme':
reader = Reader_cyme(data_folder_path=os.path.join('..',dirpath),load_filename="load.txt", network_filename="network.txt", equipment_filename="equipment.txt")
else:
#Update with other tests if they get added to the persistence tests
continue
reader.parse(m)
m.set_names()
print("Writing "+dirpath)
w = Writer(output_path=dirpath, log_path=dirpath)
w.write(m)
if __name__ == '__main__':
update_persistence_jsons()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 23 22:48:24 2018
@author: ning
"""
import os
working_dir = ''
import pandas as pd
pd.options.mode.chained_assignment = None
import seaborn as sns
import numpy as np
from sklearn.model_selection import StratifiedKFold,permutation_test_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.multiclass import OneVsOneClassifier,OneVsRestClassifier
sns.set_style('whitegrid')
sns.set_context('poster')
from utils import post_processing
saving_dir = '../figures/'
##############################################################################
############################## plotting part #################################
##############################################################################
############################## using all 6 features ##########################
if __name__ == '__main__':
pos = pd.read_csv('../results/Pos.csv')
att = pd.read_csv('../results/ATT.csv')
# don't work on the loaded data frame, make a copy of it
df = pos.copy()
g = sns.factorplot(x = 'window',
y = 'score',
hue = 'model',
data = df,
hue_order = ['DecisionTreeClassifier','LogisticRegression'],
aspect = 3,
dodge = 0.1)
# for seaborn 0.9.0
#g = sns.catplot( x = 'window',
# y = 'score',
# hue = 'model',
# data = df,
# aspect = 3,
# kind = 'point',
# hue_order = ['DecisionTreeClassifier','LogisticRegression'],
# ci = 95)
(g.set_axis_labels('Trials look back',
'Clasifi.Score (AUC ROC)')
.fig.suptitle('Model Comparison of Decoding Probability of Success'))
g.fig.savefig(os.path.join(saving_dir,
'Model Comparison of Decoding Probability of Success.png'),
dpi = 500,
bbox_inches = 'tight')
df_post = post_processing(df)
g = sns.factorplot( x = 'Window',
y = 'Values',
hue = 'Attributes',
row = 'Models',
row_order=['DecisionTreeClassifier','LogisticRegression'],
data = df_post,
aspect = 3,
sharey = False,
dodge = 0.1)
# for seaborn 0.9.0
#g = sns.catplot( x = 'window',
# y = 'value',
# hue = 'Attributions',
# row = 'model',
# data = df,
# aspect = 3,
# sharey = False,
# kind = 'point',
# ci = 95)
(g.set_axis_labels('Trials look back',
'')
.set_titles('{row_name}')
.fig.suptitle('Probability of Success',
y = 1.0))
g.fig.axes[0].set(ylabel='Feature Importance')
g.fig.axes[1].set(ylabel='Coefficients')
g.savefig(os.path.join(saving_dir,'Weights plot of Probability of Success.png'),
dpi = 500,
bbox_inches = 'tight',)
df = att.copy()
g = sns.factorplot( x = 'window',
y = 'score',
hue = 'model',
data = df,
hue_order = ['DecisionTreeClassifier','LogisticRegression'],
aspect = 3,
dodge = 0.1)
# for seaborn 0.9.0
#g = sns.catplot( x = 'window',
# y = 'score',
# hue = 'model',
# data = df,
# aspect = 3,
# kind = 'point',
# hue_order = ['DecisionTreeClassifier','LogisticRegression'],
# ci = 95)
(g.set_axis_labels('Trials look back',
'Clasifi.Score (AUC ROC)')
.fig.suptitle('Model Comparison of Decoding Attention'))
g.savefig(os.path.join(saving_dir,'Model Comparison of Decoding Attention.png'),
dpi = 500,
bbox_inches = 'tight')
df_post = post_processing(df)
g = sns.factorplot( x = 'Window',
y = 'Values',
hue = 'Attributes',
row = 'Models',
row_order=['DecisionTreeClassifier','LogisticRegression'],
data = df_post,
aspect = 3,
sharey = False,
dodge = 0.1)
# for seaborn 0.9.0
#g = sns.catplot( x = 'window',
# y = 'value',
# hue = 'Attributions',
# row = 'model',
# data = df,
# aspect = 3,
# sharey = False,
# kind = 'point',
# ci = 95)
(g.set_axis_labels('Trials look back',
'')
.set_titles('{row_name}')
.fig.suptitle('Attention',
y = 1.0))
g.fig.axes[0].set(ylabel='Feature Importance')
g.fig.axes[1].set(ylabel='Coefficients')
g.savefig(os.path.join(saving_dir,'Weights plot of Attention.png'),
dpi = 500,
bbox_inches = 'tight',)
###############################################################################
###################### plot the normalized weights ############################
###############################################################################
pos_ttest = pd.read_csv('../results/Pos_ttest.csv')
att_ttest = pd.read_csv('../results/ATT_ttest.csv')
df = pos_ttest.copy()
g = sns.factorplot(x = 'window',
y = 'ps_mean',
hue = 'model',
ci = None,
kind = 'bar',
data = df,
hue_order = ['DecisionTreeClassifier','LogisticRegression'],
aspect = 2.5)
# for seaborn 0.9.0
#g = sns.catplot( x = 'window',
# y = 'ps_mean',
# hue = 'model',
# ci = None,
# kind = 'bar',
# data = df,
# aspect = 2.5,
# )
g.set_axis_labels('Trials look back',
'Mean of P values (corrected)')
g.fig.axes[0].axhline(0.05,
color = 'red',
linestyle = '--',
alpha = 0.6)
g.fig.suptitle('Probability of Success\nBonferroni corrected P values',y=1.07)
g.savefig(os.path.join(saving_dir,'Significance test of Proabability of Success.png'),
dpi = 500,
bbox_inches = 'tight')
df = att_ttest.copy()
g = sns.factorplot(x = 'window',
y = 'ps_mean',
hue ='model',
ci = None,
kind = 'bar',
data = df,
hue_order = ['DecisionTreeClassifier','LogisticRegression'],
aspect = 2.5)
# for seaborn 0.9.0
#g = sns.catplot( x = 'window',
# y = 'ps_mean',
# hue = 'model',
# ci = None,
# kind = 'bar',
# data =df,
# aspect = 2.5,
# )
g.set_axis_labels('Trials look back',
'Mean of P values (corrected)')
g.fig.axes[0].axhline(0.05,
color = 'red',
linestyle = '--',
alpha = 0.6)
g.fig.suptitle('Attention\nBonferroni corrected P values',
y = 1.05)
g.savefig(os.path.join(saving_dir,'Significance test of Attention.png'),
dpi = 500,
bbox_inches = 'tight')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.