content stringlengths 5 1.05M |
|---|
# Helper module
try:
from .helper import H
except:
from helper import H
# Settings variables
try:
from . import settings as S
except:
import settings as S
# View module
try:
from . import view as V
except:
import view as V
# Modules to be imported from package when using *
__all__ = ['config','dbgp','H','load','log','protocol','S','session','util','V'] |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/planning/proto/pad_msg.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from modules.common.proto import header_pb2 as modules_dot_common_dot_proto_dot_header__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/planning/proto/pad_msg.proto',
package='apollo.planning',
syntax='proto2',
serialized_pb=_b('\n$modules/planning/proto/pad_msg.proto\x12\x0f\x61pollo.planning\x1a!modules/common/proto/header.proto\"c\n\nPadMessage\x12%\n\x06header\x18\x01 \x01(\x0b\x32\x15.apollo.common.Header\x12.\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1e.apollo.planning.DrivingAction*W\n\rDrivingAction\x12\n\n\x06\x46OLLOW\x10\x00\x12\x0f\n\x0b\x43HANGE_LEFT\x10\x01\x12\x10\n\x0c\x43HANGE_RIGHT\x10\x02\x12\r\n\tPULL_OVER\x10\x03\x12\x08\n\x04STOP\x10\x04')
,
dependencies=[modules_dot_common_dot_proto_dot_header__pb2.DESCRIPTOR,])
_DRIVINGACTION = _descriptor.EnumDescriptor(
name='DrivingAction',
full_name='apollo.planning.DrivingAction',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FOLLOW', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHANGE_LEFT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHANGE_RIGHT', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PULL_OVER', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOP', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=193,
serialized_end=280,
)
_sym_db.RegisterEnumDescriptor(_DRIVINGACTION)
DrivingAction = enum_type_wrapper.EnumTypeWrapper(_DRIVINGACTION)
FOLLOW = 0
CHANGE_LEFT = 1
CHANGE_RIGHT = 2
PULL_OVER = 3
STOP = 4
_PADMESSAGE = _descriptor.Descriptor(
name='PadMessage',
full_name='apollo.planning.PadMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='apollo.planning.PadMessage.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='action', full_name='apollo.planning.PadMessage.action', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=191,
)
_PADMESSAGE.fields_by_name['header'].message_type = modules_dot_common_dot_proto_dot_header__pb2._HEADER
_PADMESSAGE.fields_by_name['action'].enum_type = _DRIVINGACTION
DESCRIPTOR.message_types_by_name['PadMessage'] = _PADMESSAGE
DESCRIPTOR.enum_types_by_name['DrivingAction'] = _DRIVINGACTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PadMessage = _reflection.GeneratedProtocolMessageType('PadMessage', (_message.Message,), dict(
DESCRIPTOR = _PADMESSAGE,
__module__ = 'modules.planning.proto.pad_msg_pb2'
# @@protoc_insertion_point(class_scope:apollo.planning.PadMessage)
))
_sym_db.RegisterMessage(PadMessage)
# @@protoc_insertion_point(module_scope)
|
from django.utils.translation import ugettext as _
from misago.core.mail import build_mail, send_messages
from misago.threads.permissions import can_see_post, can_see_thread
from . import PostingEndpoint, PostingMiddleware
class EmailNotificationMiddleware(PostingMiddleware):
def __init__(self, **kwargs):
super(EmailNotificationMiddleware, self).__init__(**kwargs)
self.previous_last_post_on = self.thread.last_post_on
def use_this_middleware(self):
return self.mode == PostingEndpoint.REPLY
def post_save(self, serializer):
queryset = self.thread.subscription_set.filter(
send_email=True,
last_read_on__gte=self.previous_last_post_on,
).exclude(user=self.user).select_related('user')
notifications = []
for subscription in queryset.iterator():
if self.notify_user_of_post(subscription.user):
notifications.append(self.build_mail(subscription.user))
if notifications:
send_messages(notifications)
def notify_user_of_post(self, subscriber):
see_thread = can_see_thread(subscriber, self.thread)
see_post = can_see_post(subscriber, self.post)
return see_thread and see_post
def build_mail(self, subscriber):
if subscriber.id == self.thread.starter_id:
subject = _('%(user)s has replied to your thread "%(thread)s"')
else:
subject = _('%(user)s has replied to thread "%(thread)s" that you are watching')
subject_formats = {'user': self.user.username, 'thread': self.thread.title}
return build_mail(
self.request,
subscriber,
subject % subject_formats,
'misago/emails/thread/reply',
{
'thread': self.thread,
'post': self.post,
},
)
|
from .types import (
read_functype,
read_globaltype,
read_memtype,
read_tabletype,
read_valtype,
)
from .values import get_vec_len, read_name, read_uint
from .instructions import read_expr
def read_customsec(buffer: object, length: int) -> tuple:
"""Read a custom section from buffer."""
# https://www.w3.org/TR/wasm-core-1/#custom-section%E2%91%A0
start = buffer.tell()
name = read_name(buffer)
bytes = buffer.read(length - (buffer.tell() - start))
return name, bytes
def read_typesec(buffer: object) -> tuple:
"""Read a type section from buffer."""
# https://www.w3.org/TR/wasm-core-1/#type-section%E2%91%A0
return tuple(read_functype(buffer) for _ in range(get_vec_len(buffer)))
def read_importsec(buffer: object) -> tuple:
"""Read an import section from buffer."""
# https://www.w3.org/TR/wasm-core-1/#import-section%E2%91%A0
im = ()
try:
for _ in range(get_vec_len(buffer)):
import_ = {"module": read_name(buffer), "name": read_name(buffer)}
flag = buffer.read(1)[0]
assert flag in range(4)
if not flag:
import_["desc"] = ("func", read_uint(buffer, 32))
elif flag == 1:
import_["desc"] = ("table", read_tabletype(buffer))
elif flag == 2:
import_["desc"] = ("mem", read_memtype(buffer))
elif flag == 3:
import_["desc"] = ("global", read_globaltype(buffer))
im += (import_,)
except (IndexError, AssertionError):
raise TypeError("Invalid import section.")
return im
def read_funcsec(buffer: object) -> tuple:
"""Read a function section from buffer."""
# https://www.w3.org/TR/wasm-core-1/#function-section%E2%91%A0
return tuple(read_uint(buffer, 32) for _ in range(get_vec_len(buffer)))
def read_tablesec(buffer: object) -> tuple:
"""Read a table section from buffer."""
# https://www.w3.org/TR/wasm-core-1/#table-section%E2%91%A0
return tuple({"type": read_tabletype(buffer)} for _ in range(get_vec_len(buffer)))
def read_memsec(buffer: object) -> tuple:
"""Read a memory section from buffer."""
# https://www.w3.org/TR/wasm-core-1/#memory-section%E2%91%A0
return tuple({"type": read_memtype(buffer)} for _ in range(get_vec_len(buffer)))
def read_globalsec(buffer: object) -> tuple:
"""Read a global section from buffer."""
# https://www.w3.org/TR/wasm-core-1/#global-section%E2%91%A0
return tuple(
{"gt": read_globaltype(buffer), "e": read_expr(buffer)}
for _ in range(get_vec_len(buffer))
)
def read_exportsec(buffer: object) -> tuple:
"""Read an export section from buffer."""
# https://www.w3.org/TR/wasm-core-1/#export-section%E2%91%A0
ex = ()
for _ in range(get_vec_len(buffer)):
export = {"name": read_name(buffer)}
desc = buffer.read(1)[0]
assert desc in range(4)
if not desc:
export["desc"] = "func", read_uint(buffer, 32)
if desc == 1:
export["desc"] = "table", read_uint(buffer, 32)
if desc == 2:
export["desc"] = "mem", read_uint(buffer, 32)
if desc == 3:
export["desc"] = "global", read_uint(buffer, 32)
ex += (export,)
return ex
def read_startsec(buffer: object) -> dict:
"""Read a start section from buffer."""
# https://www.w3.org/TR/wasm-core-1/#start-section%E2%91%A0
return {"func": read_uint(buffer, 32)}
def read_elemsec(buffer: object) -> tuple:
"""Read an element section from buffer."""
# https://www.w3.org/TR/wasm-core-1/#element-section%E2%91%A0
seg = ()
for _ in range(get_vec_len(buffer)):
seg += (
{
"table": read_uint(buffer, 32),
"offset": read_expr(buffer),
"init": tuple(
read_uint(buffer, 32) for _ in range(get_vec_len(buffer))
),
},
)
return seg
def read_codesec(buffer: object) -> tuple:
"""Read a code section from buffer."""
# https://www.w3.org/TR/wasm-core-1/#code-section%E2%91%A0
code = ()
try:
for _ in range(get_vec_len(buffer)):
size = read_uint(buffer, 32)
start = buffer.tell()
t = ()
for _ in range(get_vec_len(buffer)):
n = read_uint(buffer, 32)
t += ((read_valtype(buffer),),)
concat_t = ()
for locals in t:
concat_t += locals
code += ((concat_t, read_expr(buffer)),)
end = buffer.tell()
assert size == end - start
except AssertionError:
raise TypeError("Invalid code section.")
return code
def read_datasec(buffer: object) -> tuple:
"""Read a data section from buffer."""
# https://www.w3.org/TR/wasm-core-1/#data-section%E2%91%A0
return tuple(
{
"data": read_uint(buffer, 32),
"offset": read_expr(buffer),
"init": tuple(buffer.read(1)[0] for _ in range(get_vec_len(buffer))),
}
for _ in range(get_vec_len(buffer))
)
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: Ilya Baldin (ibaldin@renci.org)
from typing import List
import networkx_query as nxq
from .abc_asm import ABCASMPropertyGraph
from ..abc_property_graph import ABCPropertyGraph, PropertyGraphQueryException
from ..networkx_property_graph import NetworkXPropertyGraph, NetworkXGraphImporter
class NetworkxASM(ABCASMPropertyGraph, NetworkXPropertyGraph):
"""
Class implementing Abstract Slice Model on top of NetworkX
"""
def __init__(self, *, graph_id: str, importer: NetworkXGraphImporter, logger=None):
super().__init__(graph_id=graph_id, importer=importer, logger=logger)
def find_node_by_name(self, node_name: str, label: str) -> str:
"""
Return a node id of a node with this name
:param node_name:
:param label: label/class of the node
:return:
"""
assert node_name is not None
my_graph = self.storage.get_graph(self.graph_id)
graph_nodes = list(nxq.search_nodes(my_graph,
{'and': [
{'eq': [ABCPropertyGraph.GRAPH_ID, self.graph_id]},
{'eq': [ABCPropertyGraph.PROP_NAME, node_name]},
{'eq': [ABCPropertyGraph.PROP_CLASS, label]}
]}))
if len(graph_nodes) == 0:
raise PropertyGraphQueryException(graph_id=self.graph_id, node_id=None,
msg=f"Unable to find node with name {node_name} class {label}")
if len(graph_nodes) > 1:
raise PropertyGraphQueryException(graph_id=self.graph_id, node_id=None,
msg=f"Graph contains multiple nodes with name {node_name} class {label}")
return my_graph.nodes[graph_nodes[0]][ABCPropertyGraph.NODE_ID]
def check_node_name(self, *, node_id: str, label: str, name: str) -> bool:
assert node_id is not None
assert name is not None
assert label is not None
graph_nodes = list(nxq.search_nodes(self.storage.get_graph(self.graph_id),
{'and': [
{'eq': [ABCPropertyGraph.GRAPH_ID, self.graph_id]},
{'eq': [ABCPropertyGraph.PROP_NAME, name]},
{'eq': [ABCPropertyGraph.PROP_CLASS, label]},
{'eq': [ABCPropertyGraph.NODE_ID, node_id]}
]}))
return len(graph_nodes) > 0
class NetworkXASMFactory:
"""
Help convert graphs between formats so long as they are rooted in NetworkXPropertyGraph
"""
@staticmethod
def create(graph: NetworkXPropertyGraph) -> NetworkxASM:
assert graph is not None
assert isinstance(graph.importer, NetworkXGraphImporter)
return NetworkxASM(graph_id=graph.graph_id,
importer=graph.importer,
logger=graph.log)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from xkbgroup.version import VERSION
def read_readme():
with open("README.rst") as f:
return f.read()
setup(
name="xkbgroup",
version=VERSION,
description="Query and change XKB layout state",
long_description=read_readme(),
author="Nguyen Duc My",
author_email="hcpl.prog@gmail.com",
url="https://github.com/hcpl/xkbgroup",
packages=["xkbgroup"],
package_data={"": ["LICENSE", "README.rst", "generate_bindings.sh"]},
license="MIT",
zip_safe=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: X11 Applications",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Desktop Environment :: Window Managers",
"Topic :: Software Development :: Libraries"
],
entry_points={
"console_scripts": [
"xkbgroup = xkbgroup.__main__:main"
]
},
)
|
#!/usr/bin/env python3
# -*- CoDing: utf-8 -*-
"""
Created on May 22 2019
Last Update May 22 2019
@author: simonvanvliet
Department of Zoology
University of Britisch Columbia
vanvliet@zoology.ubc.ca
This recreates the data and figure for figure 4
By default data is loaded unless parameters have changes, to rerun model set override_data to True
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import mls_general_code as mlsg
from pathlib import Path
import matplotlib.colors as colr
import seaborn as sns
"""
# SET model settings
"""
# set to True to force recalculation of data
override_data = False
# set folder
data_folder = Path("Data_Paper/")
fig_Folder = Path("Figures_Paper/")
figureName = 'figure4.pdf'
"""
# SET figure settings
"""
# set figure settings
wFig = 8.7
hFig = 3
font = {'family': 'Helvetica',
'weight': 'light',
'size': 6}
axes = {'linewidth': 0.5,
'titlesize': 7,
'labelsize': 6,
'labelpad': 2,
'spines.top': False,
'spines.right': False,
}
ticks = {'major.width': 0.5,
'minor.width': 0.3,
'direction': 'in',
'major.size': 2,
'minor.size': 1.5,
'labelsize': 6,
'major.pad': 2}
legend = {'fontsize': 6,
'handlelength': 1.5,
'handletextpad': 0.5,
'labelspacing': 0.2}
figure = {'dpi': 300}
savefigure = {'dpi': 300,
'transparent': True}
mpl.style.use('seaborn-ticks')
mpl.rc('font', **font)
mpl.rc('axes', **axes)
mpl.rc('xtick', **ticks)
mpl.rc('ytick', **ticks)
mpl.rc('legend', **legend)
mpl.rc('figure', **figure)
mpl.rc('savefig', **savefigure)
colors = ['777777', 'E24A33', '348ABD', '988ED5',
'FBC15E', '8EBA42', 'FFB5B8']
mpl.rcParams['axes.prop_cycle'] = mpl.cycler(color=colors)
"""
Main code
"""
# plot cell densities
def plot_ver_hor_cell(axs, n0, mig):
# setup time vector
maxT = 6
minT = -3
t = np.logspace(minT, maxT, int(1E5))
# calculate cell density and fraction vertical transmitted
n_t = mlsg.calc_tauHer_nt(t, n0, mig, 1, 1)
f_t = mlsg.calc_tauHer_ft(t, n0, mig, 1, 1)
v_t = f_t * n_t
h_t = (1 - f_t) * n_t
logt = np.log10(t)
# plot
axs.plot(logt, n_t, linewidth=1, label='tot')
axs.plot(logt, v_t, '--', linewidth=1, label='vert')
axs.plot(logt, h_t, ':', linewidth=1, label='hor')
#axs.set_xlabel('$\log_{10}$ time [a.u.]')
axs.set_ylabel("density")
maxY = 1
xStep = 4
yStep = 3
axs.set_ylim((0, maxY+0.05))
axs.set_xlim((minT, maxT))
axs.set_xticks(np.linspace(minT, maxT, xStep))
axs.set_yticks(np.linspace(0, maxY, yStep))
axs.tick_params(labelbottom=False)
axs.legend(loc='upper left')
# axs.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
return None
# plot fraction vertical transmitted
def plot_verFracl(axs, n0, mig_rel_vec):
# cerate time vector
maxT = 6
minT = -3
t = np.logspace(minT, maxT, int(1E5))
colors = sns.color_palette("Blues_d", n_colors=len(mig_rel_vec))
# calculate fraction vertically transmitted for different migration rates
for i, mig in enumerate(mig_rel_vec):
f_t = mlsg.calc_tauHer_ft(t, n0, mig*n0, 1, 1)
axs.plot(np.log10(t), f_t, linewidth=1, c=colors[i],
label='$%2.1f$' % mig)
axs.plot([minT, maxT], [0.5, 0.5], 'k:', linewidth=0.5)
axs.set_xlabel('$\log_{10}$ time [a.u.]')
axs.set_ylabel("fraction vert")
maxY = 1
xStep = 4
yStep = 3
axs.set_xlim((minT, maxT))
axs.set_xticks(np.linspace(minT, maxT, xStep))
axs.set_ylim((0, maxY+0.05))
#axs.set_xlim((0, maxT))
#axs.set_xticks(np.linspace(0, maxT, xStep))
axs.set_yticks(np.linspace(0, maxY, yStep))
# axs.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
# ncol=4, mode="expand", borderaxespad=0.)
axs.legend(bbox_to_anchor=(1.05, 0), loc='lower left',
borderaxespad=0., borderpad=0.)
return None
# plot heritability time
def plot_tauH_heatmap(fig, axs, cbaxes):
# setup grid
n0 = (-9, -1)
mig = (-9, -1)
n0Fine = np.logspace(*n0, 1000)
migFine = np.logspace(*mig, 1000)
n0Gr, migGr = np.meshgrid(n0Fine, migFine)
# calc tau_Her
tauHMat = mlsg.calc_tauHer_numeric(n0Gr, migGr)
# plot Heatmap
viridisBig = mpl.cm.get_cmap('coolwarm', 512)
indexVec = np.hstack((np.linspace(0, 0.5, 100),
np.linspace(0.5, 1-1E-12, 150)))
cmap = mpl.colors.ListedColormap(viridisBig(indexVec))
colors = sns.color_palette("RdBu_r", 1024)
idx = np.floor(indexVec*1024).astype(int)
cmap = [colors[i] for i in idx]
cmap = colr.ListedColormap(cmap)
currData = np.log10(tauHMat)
axl = axs.imshow(currData, cmap=cmap,
interpolation='nearest',
extent=[*n0, *mig],
origin='lower',
vmin=-6, vmax=9)
axs.set_xticks([-9, -6, -3, -1])
axs.set_yticks([-9, -6, -3, -1])
axs.set_xlabel('$\\log_{10} \\frac{n_0}{k}$')
axs.set_ylabel('$\\log_{10} \\frac{\\theta}{\\beta}$')
axs.set_aspect('equal')
cbaxes.set_axis_off()
cb = fig.colorbar(axl, ax=cbaxes, orientation='horizontal',
label="$\\log_{10}\\tau_{her}$",
ticks=[-6, -3, 0, 3, 6, 9], anchor=(0.5, 0), aspect=30, shrink=0.5)
cb.ax.xaxis.set_ticks_position('top')
cb.ax.xaxis.set_label_position('top')
# create figure
def create_fig():
# set settings
n0 = 1E-4
mig1 = 0.5 * n0
mig_vec_rel = [0.1, 0.5, 2, 10]
# setup manual axis for subplots
bm = 0.22
tm = 0.06
cm = 0.05
h = (1 - bm - cm - tm)/2
lm = 0.08
rm = 0
cmh = 0.18
w1 = 0.4
w2 = 1 - w1 - cmh - rm - lm
h3 = 0.1
tm3 = 0.05
h2 = 1 - h3 - cm - bm - tm3
fig = plt.figure()
mlsg.set_fig_size_cm(fig, wFig, hFig)
ax = fig.add_axes([lm, bm+cm+h, w1, h])
plot_ver_hor_cell(ax, n0, mig1)
ax = fig.add_axes([lm, bm, w1, h])
plot_verFracl(ax, n0, mig_vec_rel)
ax.annotate('$\\frac{\\theta/\\beta}{n_0/k}=$',
xy=(w1+lm+0.02, bm+h*1.2), xycoords='figure fraction',
horizontalalignment='left',
verticalalignment='top')
ax = fig.add_axes([lm + cmh + w1, bm, w2, h2])
cbaxes = fig.add_axes([lm + cmh + w1, h2 + bm + cm, w2, h3])
plot_tauH_heatmap(fig, ax, cbaxes)
#plt.tight_layout(pad=0.2, h_pad=0.5, w_pad=0.5)
fig.savefig(fig_Folder / figureName,
format="pdf", transparent=True)
return None
if __name__ == "__main__":
create_fig()
|
"""Internal API endpoint constant library.
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
_spotlight_vulnerabilities_endpoints = [
[
"combinedQueryVulnerabilities",
"GET",
"/spotlight/combined/vulnerabilities/v1",
"Search for Vulnerabilities in your environment by providing an FQL filter and paging details. "
"Returns a set of Vulnerability entities which match the filter criteria",
"spotlight_vulnerabilities",
[
{
"type": "string",
"description": "A pagination token used with the `limit` parameter to manage pagination of results. "
"On your first request, don't provide an `after` token. On subsequent requests, provide the `after` "
"token from the previous response to continue from that place in the results.",
"name": "after",
"in": "query"
},
{
"maximum": 5000,
"minimum": 1,
"type": "integer",
"description": "The number of items to return in this response (default: 100, max: 5000). "
"Use with the after parameter to manage pagination of results.",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "Sort vulnerabilities by their properties. Common sort options "
"include:\n\n<ul><li>created_timestamp|desc</li><li>closed_timestamp|asc</li></ul>",
"name": "sort",
"in": "query"
},
{
"type": "string",
"description": "Filter items using a query in Falcon Query Language (FQL). "
"Wildcards * are unsupported. \n\nCommon filter options include:\n\n<ul>"
"<li>created_timestamp:>'2019-11-25T22:36:12Z'</li><li>closed_timestamp:>'2019-11-25T22:36:12Z'</li>"
"<li>aid:'8e7656b27d8c49a34a1af416424d6231'</li></ul>",
"name": "filter",
"in": "query",
"required": True
},
{
"type": "array",
"items": {
"type": "string"
},
"collectionFormat": "csv",
"description": "Select various details blocks to be returned for each vulnerability entity. "
"Supported values:\n\n<ul><li>host_info</li><li>remediation_details</li><li>cve_details</li></ul>",
"name": "facet",
"in": "query"
}
]
],
[
"getRemediationsV2",
"GET",
"/spotlight/entities/remediations/v2",
"Get details on remediation by providing one or more IDs",
"spotlight_vulnerabilities",
[
{
"type": "array",
"items": {
"type": "string"
},
"collectionFormat": "multi",
"description": "One or more remediation IDs",
"name": "ids",
"in": "query",
"required": True
}
]
],
[
"getVulnerabilities",
"GET",
"/spotlight/entities/vulnerabilities/v2",
"Get details on vulnerabilities by providing one or more IDs",
"spotlight_vulnerabilities",
[
{
"type": "array",
"items": {
"type": "string"
},
"collectionFormat": "multi",
"description": "One or more vulnerability IDs (max: 400). "
"Find vulnerability IDs with GET /spotlight/queries/vulnerabilities/v1",
"name": "ids",
"in": "query",
"required": True
}
]
],
[
"queryVulnerabilities",
"GET",
"/spotlight/queries/vulnerabilities/v1",
"Search for Vulnerabilities in your environment by providing an FQL filter and paging details. "
"Returns a set of Vulnerability IDs which match the filter criteria",
"spotlight_vulnerabilities",
[
{
"type": "string",
"description": "A pagination token used with the `limit` parameter to manage pagination of results. "
"On your first request, don't provide an `after` token. On subsequent requests, provide the `after` "
"token from the previous response to continue from that place in the results.",
"name": "after",
"in": "query"
},
{
"maximum": 400,
"minimum": 1,
"type": "integer",
"description": "The number of items to return in this response (default: 100, max: 400). "
"Use with the after parameter to manage pagination of results.",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "Sort vulnerabilities by their properties. Common sort options include:\n\n"
"<ul><li>created_timestamp|desc</li><li>closed_timestamp|asc</li></ul>",
"name": "sort",
"in": "query"
},
{
"type": "string",
"description": "Filter items using a query in Falcon Query Language (FQL). Wildcards * are unsupported. "
"\n\nCommon filter options include:\n\n<ul><li>created_timestamp:>'2019-11-25T22:36:12Z'</li>"
"<li>closed_timestamp:>'2019-11-25T22:36:12Z'</li><li>aid:'8e7656b27d8c49a34a1af416424d6231'</li></ul>",
"name": "filter",
"in": "query",
"required": True
}
]
],
[
"getRemediations",
"GET",
"/spotlight/entities/remediations/v2",
"Get details on remediations by providing one or more IDs",
"spotlight_vulnerabilities",
[
{
"type": "array",
"items": {
"type": "string"
},
"collectionFormat": "multi",
"description": "One or more remediation IDs (max: 400).",
"name": "ids",
"in": "query",
"required": True
}
]
]
]
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Optional
import pygame
from snecs.typedefs import EntityID
from scripts.engine.core.constants import GameState, GameStateType
from scripts.engine.world_objects.gamemap import Gamemap
if TYPE_CHECKING:
from typing import TYPE_CHECKING, Dict
class _Store:
"""
Hold the current state info required by the engine. Must be serialised.
Should only be accessed via getters and setters, not directly.
"""
def __init__(self):
self.internal_clock = pygame.time.Clock()
# used in state
self.current_game_state: GameStateType = GameState.LOADING
self.previous_game_state: GameStateType = GameState.LOADING
self.active_skill = None
# used in world
self.current_gamemap: Optional[Gamemap] = None
# used in chronicle
self.turn_queue: Dict[EntityID, int] = {} # (entity, time)
self.round: int = 1 # count of the round
self.time: int = 1 # total time of actions taken
self.time_of_last_turn: int = 1
self.round_time: int = 0 # tracker of time progressed in current round
self.turn_holder: EntityID = -1 # current acting entity
def serialise(self) -> Dict[str, Any]:
"""
Serialise all data held in the store.
"""
if self.current_gamemap:
game_map = self.current_gamemap.serialise()
else:
game_map = {}
_dict = {
"current_game_state": self.current_game_state,
"previous_game_state": self.previous_game_state,
"current_gamemap": game_map,
"turn_queue": self.turn_queue,
"round": self.round,
"time": self.time,
"time_of_last_turn": self.time_of_last_turn,
"round_time": self.round_time,
"turn_holder": self.turn_holder
}
return _dict
def deserialise(self, serialised: Dict[str, Any]):
"""
Loads the details from the serialised data back into the store.
"""
try:
self.current_game_state = serialised["current_game_state"]
self.previous_game_state = serialised["previous_game_state"]
if serialised["current_gamemap"]:
game_map = Gamemap.deserialise(serialised["current_gamemap"])
else:
game_map = None
self.current_gamemap = game_map
self.turn_queue = serialised["turn_queue"]
self.round = serialised["round"]
self.time = serialised["time"]
self.time_of_last_turn = serialised["time_of_last_turn"]
self.round_time = serialised["round_time"]
self.turn_holder = serialised["turn_holder"]
except KeyError as e:
logging.warning(f"Store.Deserialise: Incorrect key ({e.args[0]}) given. Data not loaded correctly.")
store = _Store()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved.
#
# File: tools/timer.py
# Date: 2019/10/12 15:12:02
# Author: hehuang@baidu.com
#
################################################################################
from collections import defaultdict
import time
class Timer(object):
def __init__(self):
self.reset()
def reset(self):
self.pass_time = 0.0
def start(self):
self.time = time.time()
def end(self):
self.pass_time += time.time() - self.time
def __enter__(self):
self.start()
def __exit__(self, *args, **kwargs):
self.end()
timers = defaultdict(Timer)
|
class Scene:
"""
Scene has all the information needed for the ray tracing engine
"""
def __init__(self, camera, objects, width, height):
self.camera = camera
self.objects = objects
self.width = width
self.height = height
|
"""
This script converts NeSys .xyz files with coordinates specified
in the local coordinate system for the pontine nuclei defined by
Leergaard et al. 2000 to Waxholm Space coordinates for the rat brain
(in voxels).
"""
# pylint: disable=C0103
import os
import random
random.seed()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Function definitions
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def listXYZfiles(folder):
r"""This function selects non-WHS .xyz files from the active folder,
and returns the list of file names"""
filelist = []
for file in os.listdir(folder):
if file.endswith(".xyz"):
if file.endswith("-WHSvox.xyz"):
pass
else:
filelist.append(file)
print("Found " + str(len(filelist)) + " non-WHS .xyz files to convert.")
return filelist
def convert(ponscoords):
r"""This function converts incoming coordinates defined in the pontine
nuclei local coordinate system to rat brain Waxholm Space coordinates (in voxels)"""
WHScoords = []
WHScoords.append(0.00)
WHScoords.append(0.00)
WHScoords.append(0.00)
WHScoords[0] = 244 + float(ponscoords[0])/2000*59.5
WHScoords[1] = 429.9371725 + float(ponscoords[2])/2000*-47.2076 + float(ponscoords[1])/1200*10.99089
WHScoords[2] = 188.2860875 + float(ponscoords[2])/2000*-36.3546 + float(ponscoords[1])/1200*-16.2843
return WHScoords
def rndRGB():
R=1
G=1
B=1
while (R+G+B)>2.5: # Get darker colors
R = random.randint(0,255)/255
G = random.randint(0,255)/255
B = random.randint(0,255)/255
RGBline = "RGB " + str(R) + " " + str(G) + " " + str(B) + "\n"
return RGBline
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Program core
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Define max. nr of points in one rendering group for MeshView
GROUP_SIZE = 8000
# Initialize point counter (max. size: GROUP_SIZE)
pointcounter = 0
# Remember color settings for point groups larger than GROUP_SIZE
groupcolor = "RGB 1 0 0\n" # Default color: Red
# Get list of .xyz files in the current folder
os.chdir(os.path.dirname(os.path.realpath(__file__)))
xyzfiles = listXYZfiles(os.getcwd())
# Take each file on the list:
for file in xyzfiles:
# Open pons file for reading
ponsfile = open(file, "r")
# Create WHS file for writing
newfilename = file[:-4] + "-WHSvox.xyz"
WHSfile = open(newfilename, "w")
WHSfile.write("SCALE 5\n")
# Parse file
for line in ponsfile:
if line.startswith("#"): # New point group starts
pointcounter = 0 # Reset point counter
WHSfile.write(line) # Comments: copy as they are
groupcolor = rndRGB() # Reset group color
WHSfile.write(groupcolor) # Add RGB color for MeshView
elif not line.strip(): # Empty line (only spaces)
WHSfile.write("\n")
else:
pointcounter += 1
if pointcounter==GROUP_SIZE+1:
WHSfile.write(groupcolor) # Start new rendering group for MeshView, but keep old colors
pointcounter=1
# Convert coordinates
newcoords = convert(line.split())
# Write new coordinates to file
WHSfile.write(str(newcoords[0]) + " " + str(newcoords[1]) + " " + str(newcoords[2]) + "\n")
# Close both files
ponsfile.close()
WHSfile.close()
print("Conversion complete.")
|
import asyncio
import aiohttp
import time
URL = 'https://mofanpy.com/'
async def job(session):
response = await session.get(URL)
return str(response.url)
async def main(loop):
async with aiohttp.ClientSession() as session:
tasks = [loop.create_task(job(session)) for _ in range(2)]
finished, unfinished = await asyncio.wait(tasks)
all_results = [r.result() for r in finished] # get return from job
print(all_results)
t1 = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
# loop.close() # Ipython notebook gives error if close loop
print("Async total time:", time.time() - t1)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'telaAdd.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(577, 502)
Form.setFixedSize(577, 502)
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(80, 10, 401, 61))
self.label.setObjectName("label")
self.layoutWidget = QtWidgets.QWidget(Form)
self.layoutWidget.setGeometry(QtCore.QRect(170, 108, 231, 321))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.labelEmail = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.labelEmail.setFont(font)
self.labelEmail.setObjectName("labelEmail")
self.verticalLayout.addWidget(self.labelEmail)
self.lineEditEmail = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEditEmail.setObjectName("lineEditEmail")
self.lineEditEmail.setPlaceholderText('Informe o seu email')
self.verticalLayout.addWidget(self.lineEditEmail)
self.labelUserName = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.labelUserName.setFont(font)
self.labelUserName.setObjectName("labelUserName")
self.verticalLayout.addWidget(self.labelUserName)
self.lineEditUserName = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEditUserName.setObjectName("lineEditUserName")
self.lineEditUserName.setPlaceholderText('Informe o seu nome de usuário')
self.verticalLayout.addWidget(self.lineEditUserName)
self.labelPassword = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.labelPassword.setFont(font)
self.labelPassword.setObjectName("labelPassword")
self.verticalLayout.addWidget(self.labelPassword)
self.lineEditPassword = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEditPassword.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEditPassword.setObjectName("lineEditPassword")
self.lineEditPassword.setPlaceholderText('Informe a sua senha')
self.verticalLayout.addWidget(self.lineEditPassword)
self.labelPassword2 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.labelPassword2.setFont(font)
self.labelPassword2.setObjectName("labelPassword2")
self.verticalLayout.addWidget(self.labelPassword2)
self.lineEditPassword_2 = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEditPassword_2.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEditPassword_2.setObjectName("lineEditPassword_2")
self.lineEditPassword_2.setPlaceholderText('Confirme a sua senha')
self.verticalLayout.addWidget(self.lineEditPassword_2)
self.labelDateBirth = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.labelDateBirth.setFont(font)
self.labelDateBirth.setObjectName("labelDateBirth")
self.verticalLayout.addWidget(self.labelDateBirth)
self.dateBirth = QtWidgets.QDateEdit(self.layoutWidget)
self.dateBirth.setObjectName("dateBirth")
self.verticalLayout.addWidget(self.dateBirth)
self.labelGender = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.labelGender.setFont(font)
self.labelGender.setObjectName("labelGender")
self.verticalLayout.addWidget(self.labelGender)
self.selectGender = QtWidgets.QComboBox(self.layoutWidget)
self.selectGender.setObjectName("selectGender")
self.selectGender.addItem('Feminino')
self.selectGender.addItem('Masculino')
self.verticalLayout.addWidget(self.selectGender)
self.buttonSubmit = QtWidgets.QPushButton(Form)
self.buttonSubmit.setGeometry(QtCore.QRect(260, 450, 141, 29))
self.buttonSubmit.setStyleSheet('background-color:#1f4c73')
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.buttonSubmit.setFont(font)
self.buttonSubmit.setObjectName("buttonSubmit")
self.buttonBack = QtWidgets.QPushButton(Form)
self.buttonBack.setGeometry(QtCore.QRect(170, 450, 71, 29))
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.buttonBack.setFont(font)
self.buttonBack.setObjectName("buttonBack")
self.buttonBack.setStyleSheet('background-color:#1f4c73')
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle("Cadastro de Usuário")
self.label.setText(_translate("Form", "TextLabel"))
pixmap = QtGui.QPixmap("icons/iconCadUser.png")
pixmap3 = pixmap.scaled(400, 80, QtCore.Qt.KeepAspectRatio)
self.label.setPixmap(pixmap3)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.labelEmail.setText(_translate("Form", "Email:"))
self.labelUserName.setText(_translate("Form", "Nome de usuário:"))
self.labelPassword.setText(_translate("Form", "Senha:"))
self.labelPassword2.setText(_translate("Form", "Confirme a senha:"))
self.labelDateBirth.setText(_translate("Form", "Data de nascimento:"))
self.labelGender.setText(_translate("Form", "Sexo:"))
self.buttonSubmit.setText(_translate("Form", "Concluir cadastro"))
self.buttonBack.setText(_translate("Form", "Voltar"))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django import forms
import six
class AjaxForm(forms.Form):
def as_json(self, general_errors=[]):
field_errors = dict((key, [six.text_type(error) for error in errors])
for key, errors in self.errors.items())
gen_errors = general_errors + self.non_field_errors()
result = {}
if field_errors:
result['fieldErrors'] = field_errors
if gen_errors:
result['generalErrors'] = gen_errors
if hasattr(self, 'cleaned_data'):
result['values'] = self.cleaned_data
return result
class LoginForm(AjaxForm):
email = forms.EmailField(max_length=80)
password = forms.CharField(
required=False, max_length=40, widget=forms.PasswordInput,
help_text=(
'<p class="helptext">Leave this field empty '
"if you don't have an account yet,\n"
"or if you have forgotten your pass­word.\n"
"A new password will be sent to your e-mail address.</p>")
)
class SaveForm(AjaxForm):
name = forms.CharField(
required=True, max_length=30,
help_text=('<p class="helptext">Worksheet names '
'are not case-sensitive.</p>')
)
|
import math
import torch.nn as nn
from basicsr.utils.registry import ARCH_REGISTRY
cfg = {'A': [64, 128, 128, 128, 128, 128]}
def make_layers(cfg, scale, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.LeakyReLU(negative_slope=0.1, inplace=True)]
else:
layers += [conv2d, nn.LeakyReLU(negative_slope=0.1, inplace=True)]
in_channels = v
# upsample
if (scale & (scale - 1)) == 0: # scale = 2^n
for _ in range(int(math.log(scale, 2))):
layers += [nn.Conv2d(in_channels, 4 * in_channels, 3, 1, 1)]
layers += [nn.PixelShuffle(2), nn.LeakyReLU(negative_slope=0.1, inplace=True)]
elif scale == 3:
layers += [nn.Conv2d(in_channels, 9 * in_channels, 3, 1, 1)]
layers += [nn.PixelShuffle(3), nn.LeakyReLU(negative_slope=0.1, inplace=True)]
out_channels = 3
layers += [nn.Conv2d(in_channels, in_channels, 3, 1, 1), nn.LeakyReLU(negative_slope=0.1, inplace=True)]
layers += [nn.Conv2d(in_channels, out_channels, 3, 1, 1)]
return nn.Sequential(*layers)
class SRCNNStyle(nn.Module):
def __init__(self, features, init_weights=True):
super(SRCNNStyle, self).__init__()
self.features = features
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
@ARCH_REGISTRY.register()
def srcnn_style_net(scale, **kwargs):
"""srcnn_style 9-layer model (configuration "A")
Args:
scale (int): Upsampling factor. Support x2, x3 and x4.
Default: 4.
"""
model = SRCNNStyle(make_layers(cfg['A'], scale, **kwargs))
return model
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import time
from ranking.management.modules.common import REQ, BaseModule
class Statistic(BaseModule):
API_RANKING_URL_FORMAT_ = 'https://dmoj.ca/api/contest/info/{key}'
def __init__(self, **kwargs):
super(Statistic, self).__init__(**kwargs)
def get_standings(self, users=None):
url = self.API_RANKING_URL_FORMAT_.format(**self.__dict__)
try:
time.sleep(1)
page = REQ.get(url)
except Exception as e:
return {'action': 'delete'} if e.args[0].code == 404 else {}
data = json.loads(page)
problems_info = [{'short': p['code'], 'name': p['name']} for p in data['problems']]
result = {}
prev = None
rankings = sorted(data['rankings'], key=lambda x: (-x['points'], x['cumtime']))
for index, r in enumerate(rankings, start=1):
solutions = r.pop('solutions')
if not any(solutions):
continue
handle = r.pop('user')
row = result.setdefault(handle, {})
row['member'] = handle
row['solving'] = r.pop('points')
cumtime = r.pop('cumtime')
if cumtime:
row['penalty'] = self.to_time(cumtime)
curr = (row['solving'], cumtime)
if curr != prev:
prev = curr
rank = index
row['place'] = rank
solved = 0
problems = row.setdefault('problems', {})
for prob, sol in zip(data['problems'], solutions):
if not sol:
continue
p = problems.setdefault(prob['code'], {})
if sol['points'] > 0 and prob.get('partial'):
p['partial'] = prob['points'] - sol['points'] > 1e-7
if not p['partial']:
solved += 1
p['result'] = sol.pop('points')
t = sol.pop('time')
if t:
p['time'] = self.to_time(t)
row.update({k: v for k, v in r.items() if k not in row})
row['solved'] = {'solving': solved}
standings_url = hasattr(self, 'standings_url') and self.standings_url or self.url.rstrip('/') + '/ranking/'
standings = {
'result': result,
'url': standings_url,
'problems': problems_info,
}
return standings
if __name__ == "__main__":
from pprint import pprint
statictic = Statistic(
name='42',
url='https://dmoj.ca/contest/dmopc18c2/',
key='dmopc18c2',
)
pprint(statictic.get_result('ayyyyyyyyyyyyyLMAO'))
statictic = Statistic(
name="Mock CCO '19 Contest 2, Day 1",
url='http://www.dmoj.ca/contest/mcco19c2d1',
key='mcco19c2d1',
)
pprint(statictic.get_result('GSmerch', 'georgehtliu'))
statictic = Statistic(
name='Deadly Serious Contest Day 1',
url='http://www.dmoj.ca/contest/dsc19d1',
key='dsc19d1',
)
pprint(statictic.get_result('scanhex', 'wleung_bvg'))
statictic = Statistic(
name="Mock CCO '19 Contest 2, Day 1",
url='https://dmoj.ca/contest/tle16',
key='tle16',
)
pprint(statictic.get_standings())
|
"""Functional test of 'dtool tag' CLI command."""
from click.testing import CliRunner
from . import tmp_dataset_fixture # NOQA
def test_tag_basic(tmp_dataset_fixture): # NOQA
from dtool_tag.cli import tag
runner = CliRunner()
result = runner.invoke(tag, [
"set",
tmp_dataset_fixture.uri,
"e.coli",
])
assert result.exit_code == 0
result = runner.invoke(tag, [
"ls",
tmp_dataset_fixture.uri,
])
assert result.exit_code == 0
expected = "e.coli"
actual = result.output.strip()
assert actual == expected
def test_tag_invalid_name(tmp_dataset_fixture): # NOQA
from dtool_tag.cli import tag
runner = CliRunner()
# Spaces, slashes, etc are not allowed.
result = runner.invoke(tag, [
"set",
tmp_dataset_fixture.uri,
"project name",
])
assert result.exit_code == 400
expected_lines = [
"Invalid tag 'project name'",
"Tag must be 80 characters or less",
"Tags may only contain the characters: 0-9 a-z A-Z - _ .",
"Example: first-class",
]
for line in expected_lines:
assert result.output.find(line) != -1
def test_delete_command(tmp_dataset_fixture): # NOQA
from dtool_tag.cli import tag
runner = CliRunner()
# Add two tags.
result = runner.invoke(tag, [
"set",
tmp_dataset_fixture.uri,
"e.coli",
])
assert result.exit_code == 0
result = runner.invoke(tag, [
"set",
tmp_dataset_fixture.uri,
"genome",
])
assert result.exit_code == 0
# Make sure that both tags are present.
result = runner.invoke(tag, [
"ls",
tmp_dataset_fixture.uri,
])
assert result.exit_code == 0
expected_lines = ["e.coli", "genome"]
for line in expected_lines:
assert result.output.find(line) != -1
# Delete one tag.
result = runner.invoke(tag, [
"delete",
tmp_dataset_fixture.uri,
"e.coli",
])
assert result.exit_code == 0
result = runner.invoke(tag, [
"ls",
tmp_dataset_fixture.uri,
])
assert result.exit_code == 0
expected = "genome"
actual = result.output.strip()
assert actual == expected
# Delete the remaining tag.
result = runner.invoke(tag, [
"delete",
tmp_dataset_fixture.uri,
"genome",
])
assert result.exit_code == 0
result = runner.invoke(tag, [
"ls",
tmp_dataset_fixture.uri,
])
assert result.exit_code == 0
expected = ""
actual = result.output.strip()
assert actual == expected
|
import os
import sys
import unittest
from mock import Mock
sys.path.append('../../')
import blng.Voodoo
class TestVoodoo(unittest.TestCase):
def setUp(self):
self.maxDiff = 400000
def _get_session(self):
self.subject = blng.Voodoo.DataAccess('crux-example.xml')
self.root = self.subject.get_root()
return self.root
def test_list_delete_element(self):
# BUild
root = self._get_session()
one = root.twokeylist.create('a1', 'b1')
two = root.twokeylist.create('a2', 'b2')
three = root.twokeylist.create('a3', 'b3')
self.assertTrue(('a1', 'b1') in root.twokeylist)
self.assertTrue(('x1', 'b1') not in root.twokeylist)
ELEPHANT = root.simplelist.create('elephant')
CAMEL = root.simplelist.create('camel')
ZOMBIE = root.simplelist.create('zombie')
GHOUL = root.simplelist.create('ghoul')
self.assertEqual(len(root.simplelist), 4)
self.assertTrue('zombie' in root.simplelist)
self.assertFalse('zombie' not in root.simplelist)
self.assertEqual(['elephant', 'camel', 'zombie', 'ghoul'], root.simplelist.keys())
self.assertEqual([['a1', 'b1'], ['a2', 'b2'], ['a3', 'b3']], root.twokeylist.keys())
for listelement in root.twokeylist:
listelement.tertiary = listelement.primary + listelement.secondary
self.assertEqual(root.simplelist['zombie']._path, "/simplelist[simplekey='zombie']")
self.assertEqual(root.simplelist['ghoul'].simplekey, "ghoul")
# Action
del root.simplelist['zombie']
self.assertTrue('zombie' not in root.simplelist)
self.assertFalse('elephant' not in root.simplelist)
self.assertFalse('camel' not in root.simplelist)
self.assertFalse('ghoul' not in root.simplelist)
self.assertFalse('zombie' in root.simplelist)
self.assertTrue('elephant' in root.simplelist)
self.assertTrue('camel' in root.simplelist)
self.assertTrue('ghoul' in root.simplelist)
# Test that this does not actually remove the item from the list
# it should delete the reference to the list element only
listelement = root.twokeylist['a2', 'b2']
del listelement
self.assertEqual(root.twokeylist['a2', 'b2'].tertiary, 'a2b2')
del root.twokeylist['a2', 'b2']
with self.assertRaises(blng.Voodoo.BadVoodoo) as context:
x = root.twokeylist['a2', 'b2']
self.assertEqual(str(context.exception), "ListElement does not exist: /twokeylist[primary='a2'][secondary='b2']")
# Assert
self.assertEqual(len(root.simplelist), 3)
self.assertEqual(['elephant', 'camel', 'ghoul'], root.simplelist.keys())
self.assertEqual([['a1', 'b1'], ['a3', 'b3']], root.twokeylist.keys())
expected_xml = """<voodoo>
<twokeylist>
<primary listkey="yes">a1</primary>
<secondary listkey="yes">b1</secondary>
<tertiary>a1b1</tertiary>
</twokeylist>
<twokeylist>
<primary listkey="yes">a3</primary>
<secondary listkey="yes">b3</secondary>
<tertiary>a3b3</tertiary>
</twokeylist>
<simplelist>
<simplekey listkey="yes">elephant</simplekey>
</simplelist>
<simplelist>
<simplekey listkey="yes">camel</simplekey>
</simplelist>
<simplelist>
<simplekey listkey="yes">ghoul</simplekey>
</simplelist>
</voodoo>
"""
self.assertEqual(self.subject.dumps(), expected_xml)
def test_list_iteration(self):
root = self._get_session()
one = root.twokeylist.create('a1', 'b1')
two = root.twokeylist.create('a2', 'b2')
for listelement in root.twokeylist:
listelement.tertiary = listelement.primary + listelement.secondary
for listelement in root.simplelist:
self.fail('This list was empty so we should not have iterated around it')
# This has two list elements
i = 0
for listelement in root.twokeylist:
i = i + 1
self.assertEqual(i, 2)
one = root.simplelist.create('1111')
for listelement in root.simplelist:
listelement.nonleafkey = 'first-set'
listelement.nonleafkey = listelement.simplekey
expected_xml = """<voodoo>
<twokeylist>
<primary listkey="yes">a1</primary>
<secondary listkey="yes">b1</secondary>
<tertiary>a1b1</tertiary>
</twokeylist>
<twokeylist>
<primary listkey="yes">a2</primary>
<secondary listkey="yes">b2</secondary>
<tertiary>a2b2</tertiary>
</twokeylist>
<simplelist>
<simplekey listkey="yes">1111</simplekey>
<nonleafkey old_value="first-set">1111</nonleafkey>
</simplelist>
</voodoo>
"""
self.assertEqual(self.subject.dumps(), expected_xml)
def test_accessing_list_elements(self):
root = self._get_session()
x = root.twokeylist.create('a', 'b')
y = root.twokeylist['a', 'b']
y.tertiary = '3'
x = root.twokeylist.create('a', 'b')
x = root.twokeylist.create('A', 'B')
root.twokeylist.create('A', 'B').tertiary = 'sdf'
self.assertEqual(y.tertiary, '3')
expected_xml = """<voodoo>
<twokeylist>
<primary listkey="yes">a</primary>
<secondary listkey="yes">b</secondary>
<tertiary>3</tertiary>
</twokeylist>
<twokeylist>
<primary listkey="yes">A</primary>
<secondary listkey="yes">B</secondary>
<tertiary>sdf</tertiary>
</twokeylist>
</voodoo>
"""
self.assertEqual(self.subject.dumps(), expected_xml)
self.assertEqual(repr(y), "VoodooListElement: /twokeylist[primary='a'][secondary='b']")
with self.assertRaises(blng.Voodoo.BadVoodoo) as context:
a = root.twokeylist['not-existing-key', 'b']
self.assertEqual(str(context.exception), "ListElement does not exist: /twokeylist[primary='not-existing-key'][secondary='b']")
with self.assertRaises(blng.Voodoo.BadVoodoo) as context:
a = root.twokeylist['a', 'non-existing-second-key']
self.assertEqual(str(context.exception), "ListElement does not exist: /twokeylist[primary='a'][secondary='non-existing-second-key']")
def test_deserialise_and_serilaise_example_with_cache_checks(self):
serilaised_xml = """<voodoo>
<simpleleaf old_value="9998">9999</simpleleaf>
<morecomplex>
<leaf2>a</leaf2>
</morecomplex>
<simplelist>
<simplekey listkey="yes">firstkey</simplekey>
</simplelist>
<hyphen-leaf>abc123</hyphen-leaf>
<outsidelist>
<leafo listkey="yes">a</leafo>
<insidelist>
<leafi listkey="yes">A</leafi>
</insidelist>
</outsidelist>
<outsidelist>
<leafo listkey="yes">b</leafo>
</outsidelist>
</voodoo>"""
root = self._get_session()
(keystore_cache, schema_cache) = self.subject._cache
root.simpleleaf = 'value_before_loading_serialised_data'
self.assertEqual(root.simpleleaf, 'value_before_loading_serialised_data')
self.assertEqual(list(keystore_cache.items.keys()), ['/voodoo/simpleleaf'])
self.subject.loads(serilaised_xml)
self.assertEqual(list(keystore_cache.items.keys()), [])
self.assertEqual(root.morecomplex.leaf2, 'a')
self.assertEqual(root.simpleleaf, '9999')
self.assertEqual(root.hyphen_leaf, 'abc123')
self.assertEqual(list(keystore_cache.items.keys()), ['/voodoo/morecomplex',
'/voodoo/morecomplex/leaf2', '/voodoo/simpleleaf', '/voodoo/hyphen_leaf'])
root.simpleleaf = "value_after_deserialised_and_modified"
re_serilaised_xml = """<voodoo><simpleleaf old_value="9999">value_after_deserialised_and_modified</simpleleaf>
<morecomplex>
<leaf2>a</leaf2>
</morecomplex>
<simplelist>
<simplekey listkey="yes">firstkey</simplekey>
</simplelist>
<hyphen-leaf>abc123</hyphen-leaf>
<outsidelist>
<leafo listkey="yes">a</leafo>
<insidelist>
<leafi listkey="yes">A</leafi>
</insidelist>
</outsidelist>
<outsidelist>
<leafo listkey="yes">b</leafo>
</outsidelist>
</voodoo>
"""
self.assertEqual(self.subject.dumps(), re_serilaised_xml)
def test_deserialise_and_serilaise(self):
serilaised_xml = """<voodoo>
<simpleleaf old_value="9998">9999</simpleleaf>
<morecomplex>
<leaf2>a</leaf2>
</morecomplex>
<simplelist>
<simplekey listkey="yes">firstkey</simplekey>
</simplelist>
<hyphen-leaf>abc123</hyphen-leaf>
<outsidelist>
<leafo listkey="yes">a</leafo>
<insidelist>
<leafi listkey="yes">A</leafi>
</insidelist>
</outsidelist>
<outsidelist>
<leafo listkey="yes">b</leafo>
</outsidelist>
</voodoo>"""
root = self._get_session()
self.subject.loads(serilaised_xml)
root.simpleleaf = "value_after_deserialised_and_modified"
# + is what we have extra in the test
# - is what was recevied extra in the running out
re_serilaised_xml = """<voodoo><simpleleaf old_value="9999">value_after_deserialised_and_modified</simpleleaf>
<morecomplex>
<leaf2>a</leaf2>
</morecomplex>
<simplelist>
<simplekey listkey="yes">firstkey</simplekey>
</simplelist>
<hyphen-leaf>abc123</hyphen-leaf>
<outsidelist>
<leafo listkey="yes">a</leafo>
<insidelist>
<leafi listkey="yes">A</leafi>
</insidelist>
</outsidelist>
<outsidelist>
<leafo listkey="yes">b</leafo>
</outsidelist>
</voodoo>
"""
# raise ValueError(self.subject.dumps())
self.assertEqual(self.subject.dumps(), re_serilaised_xml)
def test_parents(self):
root = self._get_session()
root.psychedelia.psychedelic_rock.noise_pop.shoe_gaze.bands._parent._parent.bands.create('Jesus and the Mary Chain')
root.psychedelia.psychedelic_rock.noise_pop.dream_pop.bands.create('Night Flowers')
root.psychedelia.psychedelic_rock.noise_pop.dream_pop.bands.create('Mazzy Star')
root.psychedelia.psychedelic_rock.noise_pop.dream_pop.bands['Mazzy Star']._parent['Night Flowers'].favourite = 'True'
expected_xml = """<voodoo>
<psychedelia>
<psychedelic_rock>
<noise_pop>
<bands>
<band listkey="yes">Jesus and the Mary Chain</band>
</bands>
<dream_pop>
<bands>
<band listkey="yes">Night Flowers</band>
<favourite>True</favourite>
</bands>
<bands>
<band listkey="yes">Mazzy Star</band>
</bands>
</dream_pop>
</noise_pop>
</psychedelic_rock>
</psychedelia>
</voodoo>
"""
self.assertEqual(self.subject.dumps(), expected_xml)
self.assertEqual(root.psychedelia.psychedelic_rock.noise_pop.shoe_gaze._path, '/psychedelia/psychedelic_rock/noise_pop/shoe_gaze')
self.assertEqual(root.psychedelia.psychedelic_rock.noise_pop.shoe_gaze._parent._path, '/psychedelia/psychedelic_rock/noise_pop')
def test_list_within_list(self):
root = self._get_session()
a = root.simplelist.create('a')
for c in range(2):
root = self._get_session()
a = root.simplelist.create('a')
a.nonleafkey = 'b'
b = root.simplelist.create('b')
b.nonleafkey = 'bb'
A = root.outsidelist.create('AA')
B = root.outsidelist.create('BB')
B = root.outsidelist.create('BB')
B = root.outsidelist.create('BB')
B.insidelist.create('bbbbbb')
A = root.outsidelist.create('AA')
a = A.insidelist.create('aaaaa')
english = A.otherinsidelist.create('one', 'two', 'three')
english.otherlist4 = 'four'
french = A.otherinsidelist.create('un', 'deux', 'trois')
french.otherlist4 = 'quatre'
french.language = 'french'
italian = B.otherinsidelist.create('uno', 'due', 'tres')
italian.otherlist4 = 'quattro'
italian.language = 'italian'
spanish = B.otherinsidelist.create('uno', 'dos', 'tres')
spanish.otherlist4 = 'cuatro'
spanish.language = 'spanish'
spanish = A.otherinsidelist.create('uno', 'dos', 'tres')
spanish.otherlist4 = 'cuatro'
spanish.language = 'spanish'
german = B.otherinsidelist.create('eins', 'zwei', 'drei')
with self.assertRaises(blng.Voodoo.BadVoodoo) as context:
swedish = B.otherinsidelist.create('et', 'två', 'tre', 'fyra')
self.assertEqual(str(context.exception), "Wrong Number of keys require 3 got 4. keys defined: ['otherlist1', 'otherlist2', 'otherlist3']")
with self.assertRaises(blng.Voodoo.BadVoodoo) as context:
danish = A.otherinsidelist.create('et', 'to')
danish.language = 'danish'
self.assertEqual(str(context.exception), "Wrong Number of keys require 3 got 2. keys defined: ['otherlist1', 'otherlist2', 'otherlist3']")
dutch_part1 = A.otherinsidelist.create('een', 'twee', 'drie')
dutch_part1.otherlist4 = 'vier'
dutch_part1.language = 'dutch'
dutch_part2 = B.otherinsidelist.create('een', 'twee', 'drie')
dutch_part2.otherlist5 = 'vijf'
dutch_part2.language = 'dutch'
expected_xml = """<voodoo>
<simplelist>
<simplekey listkey="yes">a</simplekey>
<nonleafkey>b</nonleafkey>
</simplelist>
<simplelist>
<simplekey listkey="yes">b</simplekey>
<nonleafkey>bb</nonleafkey>
</simplelist>
<outsidelist>
<leafo listkey="yes">AA</leafo>
<insidelist>
<leafi listkey="yes">aaaaa</leafi>
</insidelist>
<otherinsidelist>
<otherlist1 listkey="yes">one</otherlist1>
<otherlist2 listkey="yes">two</otherlist2>
<otherlist3 listkey="yes">three</otherlist3>
<otherlist4>four</otherlist4>
</otherinsidelist>
<otherinsidelist>
<otherlist1 listkey="yes">un</otherlist1>
<otherlist2 listkey="yes">deux</otherlist2>
<otherlist3 listkey="yes">trois</otherlist3>
<otherlist4>quatre</otherlist4>
<language>french</language>
</otherinsidelist>
<otherinsidelist>
<otherlist1 listkey="yes">uno</otherlist1>
<otherlist2 listkey="yes">dos</otherlist2>
<otherlist3 listkey="yes">tres</otherlist3>
<otherlist4>cuatro</otherlist4>
<language>spanish</language>
</otherinsidelist>
<otherinsidelist>
<otherlist1 listkey="yes">een</otherlist1>
<otherlist2 listkey="yes">twee</otherlist2>
<otherlist3 listkey="yes">drie</otherlist3>
<otherlist4>vier</otherlist4>
<language>dutch</language>
</otherinsidelist>
</outsidelist>
<outsidelist>
<leafo listkey="yes">BB</leafo>
<insidelist>
<leafi listkey="yes">bbbbbb</leafi>
</insidelist>
<otherinsidelist>
<otherlist1 listkey="yes">uno</otherlist1>
<otherlist2 listkey="yes">due</otherlist2>
<otherlist3 listkey="yes">tres</otherlist3>
<otherlist4>quattro</otherlist4>
<language>italian</language>
</otherinsidelist>
<otherinsidelist>
<otherlist1 listkey="yes">uno</otherlist1>
<otherlist2 listkey="yes">dos</otherlist2>
<otherlist3 listkey="yes">tres</otherlist3>
<otherlist4>cuatro</otherlist4>
<language>spanish</language>
</otherinsidelist>
<otherinsidelist>
<otherlist1 listkey="yes">eins</otherlist1>
<otherlist2 listkey="yes">zwei</otherlist2>
<otherlist3 listkey="yes">drei</otherlist3>
</otherinsidelist>
<otherinsidelist>
<otherlist1 listkey="yes">een</otherlist1>
<otherlist2 listkey="yes">twee</otherlist2>
<otherlist3 listkey="yes">drie</otherlist3>
<otherlist5>vijf</otherlist5>
<language>dutch</language>
</otherinsidelist>
</outsidelist>
</voodoo>
"""
self.assertEqual(self.subject.dumps(), expected_xml)
def test_list_with_dump(self):
# note quite test driven but want to go to bed!
# list create()
# list create() without enough keys
# list create() with too many keys
# list create() then trying to change the key (not allowed)
# list Create() and then modifying non keys (allows)
# creating multiple list entries (different keys) shoudl be allowed
#
# Act
root = self._get_session()
listelement = root.simplelist.create('Shamanaid')
listelement.nonleafkey = 'sdf'
# Check the same list element can have the create method called a second name
listelement = root.simplelist.create('Shamanaid')
with self.assertRaises(blng.Voodoo.BadVoodoo) as context:
listelement.simplekey = 'change the value'
self.assertEqual(str(context.exception), "Changing a list key is not supported. /simplelist[simplekey='Shamanaid']/simplekey")
received_xml = self.subject.dumps()
# Assert
expected_xml = """<voodoo>
<simplelist>
<simplekey listkey="yes">Shamanaid</simplekey>
<nonleafkey>sdf</nonleafkey>
</simplelist>
</voodoo>
"""
self.assertEqual(expected_xml, received_xml)
listelement2 = root.simplelist.create('Prophet')
listelement2.nonleafkey = 'master'
received_xml = self.subject.dumps()
# Assert
expected_xml = """<voodoo>
<simplelist>
<simplekey listkey="yes">Shamanaid</simplekey>
<nonleafkey>sdf</nonleafkey>
</simplelist>
<simplelist>
<simplekey listkey="yes">Prophet</simplekey>
<nonleafkey>master</nonleafkey>
</simplelist>
</voodoo>
"""
self.assertEqual(expected_xml, received_xml)
def test_basic_xmldumps(self):
root = self._get_session()
# Act
root.morecomplex.leaf2 = "sing-and-dance-or-youll"
leaf2_value = root.morecomplex.leaf2
root.hyphen_leaf = 'underscore_in_voodoo-should-be-hyphens-in-xmldoc'
hyphen_leaf_value = root.hyphen_leaf
received_xml = self.subject.dumps()
# Assert
self.assertEqual("sing-and-dance-or-youll", leaf2_value)
self.assertEqual("underscore_in_voodoo-should-be-hyphens-in-xmldoc", hyphen_leaf_value)
expected_xml = """<voodoo>
<morecomplex>
<leaf2>sing-and-dance-or-youll</leaf2>
</morecomplex>
<hyphen-leaf>underscore_in_voodoo-should-be-hyphens-in-xmldoc</hyphen-leaf>
</voodoo>
"""
self.assertEqual(expected_xml, received_xml)
def test_basic_list(self):
root = self._get_session()
listelement = root.simplelist.create('Shamanaid')
self.assertEqual(repr(listelement), "VoodooListElement: /simplelist[simplekey='Shamanaid']")
with self.assertRaises(blng.Voodoo.BadVoodoo) as context:
a = root.simplelist['not-existing-key']
self.assertEqual(str(context.exception), "ListElement does not exist: /simplelist[simplekey='not-existing-key']")
expected_hits = ['nonleafkey', 'simplekey']
self.assertEqual(dir(listelement), expected_hits)
self.assertEqual(dir(root.simplelist), [])
self.assertEqual(root.simplelist['Shamanaid'].simplekey, 'Shamanaid')
self.assertEqual(repr(root.simplelist['Shamanaid']), "VoodooListElement: /simplelist[simplekey='Shamanaid']")
def test_basic_dir(self):
root = self._get_session()
expected_hits = ['inner', 'leaf2', 'leaf3', 'leaf4', 'nonconfig']
self.assertEqual(dir(root.morecomplex), expected_hits)
def test_basic_repr(self):
root = self._get_session()
node = root.morecomplex
self.assertEqual(repr(node), "VoodooContainer: /morecomplex")
self.assertEqual(repr(node.inner), "VoodooPresenceContainer: /morecomplex/inner")
node = root.morecomplex.leaf2
node = "x123"
self.assertEqual(repr(node), "'x123'")
def test_basic_session_leaf(self):
root = self._get_session()
value = root.simpleleaf
self.assertEqual(value, None)
root.simpleleaf = 'ABC123'
value = root.simpleleaf
self.assertEqual(value, 'ABC123')
def test_basic_session_setup(self):
self._get_session()
self.assertEqual(repr(self.root), "VoodooRoot")
def test_root_only_returns_root(self):
root = self._get_session()
with self.assertRaises(blng.Voodoo.BadVoodoo) as context:
x = root.platinum
self.assertEqual(str(context.exception), "Unable to find '/platinum' in the schema")
|
import unittest
import zserio
from testutils import getZserioApi
class ChoiceBit4RangeCheckTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "with_range_check_code.zs",
extraArgs=["-withRangeCheckCode"]).choice_bit4_range_check
def testChoiceBit4LowerBound(self):
self._checkChoiceBit4Value(BIT4_LOWER_BOUND)
def testChoiceBit4UpperBound(self):
self._checkChoiceBit4Value(BIT4_UPPER_BOUND)
def testChoiceBit4BelowLowerBound(self):
with self.assertRaises(zserio.PythonRuntimeException):
self._checkChoiceBit4Value(BIT4_LOWER_BOUND - 1)
def testChoiceBit4AboveUpperBound(self):
with self.assertRaises(zserio.PythonRuntimeException):
self._checkChoiceBit4Value(BIT4_UPPER_BOUND + 1)
def _checkChoiceBit4Value(self, value):
selector = True
choiceBit4RangeCheckCompound = self.api.ChoiceBit4RangeCheckCompound(selector)
choiceBit4RangeCheckCompound.value = value
bitBuffer = zserio.serialize(choiceBit4RangeCheckCompound)
readChoiceBit4RangeCheckCompound = zserio.deserialize(self.api.ChoiceBit4RangeCheckCompound, bitBuffer,
selector)
self.assertEqual(choiceBit4RangeCheckCompound, readChoiceBit4RangeCheckCompound)
BIT4_LOWER_BOUND = 0
BIT4_UPPER_BOUND = 15
|
#! /usr/bin/python
import sys
import re
from os import environ, linesep
import csv
KUBEVIRT_CLIENT_GO_SCHEME_REGISTRATION_VERSION = 'v1'
def get_env(line):
env = line[:line.find('=')]
return f'{env}={environ.get(env)}'
def get_env_file(outdir, file_format='txt'):
rgx = re.compile('^[^ #]+=.*$')
if file_format == 'env':
sep = linesep
ext = '.env'
else:
sep = ';'
ext = '.txt'
vars_list = ""
with open('hack/config') as infile:
vars_list = [line.strip() for line in infile if rgx.match(line)]
vars_list.append("KUBECONFIG=None")
vars_list = list(map(lambda s: get_env(s), vars_list))
with open('deploy/images.csv') as image_file:
reader = csv.DictReader(image_file, delimiter=',')
for row in reader:
if row['image_var'] in ['VMWARE_IMAGE', 'CONVERSION_IMAGE', 'KUBEVIRT_VIRTIO_IMAGE']:
image = f"{row['name']}@sha256:{row['digest']}"
env = 'VIRTIOWIN_CONTAINER' if row['image_var'] == 'KUBEVIRT_VIRTIO_IMAGE' else row['image_var'].replace("_IMAGE", "_CONTAINER")
vars_list.append(f"{env}={image}")
vars_list.extend([
f"HCO_KV_IO_VERSION={environ.get('CSV_VERSION')}",
"WEBHOOK_MODE=false",
"WEBHOOK_CERT_DIR=./_local/certs",
f"KUBEVIRT_CLIENT_GO_SCHEME_REGISTRATION_VERSION={KUBEVIRT_CLIENT_GO_SCHEME_REGISTRATION_VERSION}",
"WATCH_NAMESPACE=kubevirt-hyperconverged",
"OSDK_FORCE_RUN_MODE=local",
"OPERATOR_NAMESPACE=kubevirt-hyperconverged",
])
var_str = sep.join(vars_list)
with open(f'{outdir}/envs{ext}', 'w') as out:
out.write(var_str)
if __name__ == "__main__":
frmt = 'txt'
if len(sys.argv) == 3:
frmt = sys.argv[2]
else:
if len(sys.argv) != 2:
print("one argument of output dir is required. The second argument is optional: 'env' for .env file format")
exit(-1)
get_env_file(sys.argv[1], frmt)
|
# -*- coding: utf-8 -*-
"""
utils.py
~~~~~~~~~~~~~
Author: Pankaj Suthar
"""
class Logger:
def __init__(self, log_handler, name, entry_identifier=">>>>", exit_identifier="<<<<", log_time=False):
self.log_handler = log_handler
self.name = name
self.entry_identifier = entry_identifier
self.exit_identifier = exit_identifier
self.log_time = log_time
def __str__(self):
return "Logger Name : {}".format(self.name)
class InOutLogger:
# Here will be the instance stored.
__instance = None
@staticmethod
def getResources():
""" Static access method. """
if InOutLogger.__instance is None:
raise Exception("InOutLogger configuration is not defined. Initialize InOutLogger first.")
return InOutLogger.__instance
def __init__(self, LOGGERS, supress_warings = False):
"""
LOGGERS: Dictinary of Loggers
"""
if InOutLogger.__instance is not None:
raise Exception("Already initailized the InOutLogger")
else:
if isinstance(LOGGERS, list):
self.LOGGERS = LOGGERS
InOutLogger.__instance = self
elif isinstance(LOGGERS, Logger):
self.LOGGERS = [LOGGERS]
InOutLogger.__instance = self
else:
raise Exception("LOGGERS must be list of [ InOutLogger.Logger ]")
self.supress_warings = supress_warings
InOutLogger.__instance = self
|
# -*- coding: utf-8 -*-
from hist import axis
def test_axis_names():
"""
Test axis names -- whether axis names work.
"""
assert axis.Regular(50, -3, 3, name="x0")
assert axis.Boolean(name="x_")
assert axis.Variable(range(-3, 3), name="xx")
assert axis.Integer(-3, 3, name="x_x")
assert axis.IntCategory(range(-3, 3), name="X__X")
assert axis.StrCategory("FT", name="X00")
assert axis.Regular(50, -3, 3, name="")
assert axis.Boolean(name="")
assert axis.Variable(range(-3, 3))
assert axis.Integer(-3, 3, name="")
assert axis.IntCategory(range(-3, 3), name="")
assert axis.StrCategory("FT")
def test_axis_flow():
assert axis.Regular(9, 0, 8, flow=False) == axis.Regular(
9, 0, 8, underflow=False, overflow=False
)
assert axis.Variable([1, 2, 3], flow=False) == axis.Variable(
[1, 2, 3], underflow=False, overflow=False
)
assert axis.Integer(0, 8, flow=False) == axis.Integer(
0, 8, underflow=False, overflow=False
)
assert axis.Regular(9, 0, 8, flow=False, underflow=True) == axis.Regular(
9, 0, 8, overflow=False
)
assert axis.Variable([1, 2, 3], flow=False, underflow=True) == axis.Variable(
[1, 2, 3], overflow=False
)
assert axis.Integer(0, 8, flow=False, underflow=True) == axis.Integer(
0, 8, overflow=False
)
assert axis.Regular(9, 0, 8, flow=False, overflow=True) == axis.Regular(
9, 0, 8, underflow=False
)
assert axis.Variable([1, 2, 3], flow=False, overflow=True) == axis.Variable(
[1, 2, 3], underflow=False
)
assert axis.Integer(0, 8, flow=False, overflow=True) == axis.Integer(
0, 8, underflow=False
)
|
import json
from flask import Flask, request, current_app, render_template, redirect, url_for
from signinghub_api import SigningHubAPI
# Create a web application with Flask
app = Flask(__name__)
# Copy local_settings.py from local_settings_example.py
# Edit local_settings.py to reflect your CLIENT_ID and CLIENT_SECRET
app.config.from_pyfile('local_settings.py') # Read example_app.local_settings.py
# Initialize the SigningHub API wrapper
signinghub_api = SigningHubAPI(
app.config.get('SIGNINGHUB_CLIENT_ID'),
app.config.get('SIGNINGHUB_CLIENT_SECRET'),
app.config.get('SIGNINGHUB_USERNAME'),
app.config.get('SIGNINGHUB_PASSWORD'),
app.config.get('SIGNINGHUB_SCOPE')
)
# Retrieve config settings from local_settings.py
signinghub_library_document_id = app.config.get('SIGNINGHUB_LIBRARY_DOCUMENT_ID')
signinghub_template_name = app.config.get('SIGNINGHUB_TEMPLATE_NAME')
recipient_user_name = app.config.get('RECIPIENT_USER_NAME')
recipient_user_email = app.config.get('RECIPIENT_USER_EMAIL')
recipient_field_name = app.config.get('RECIPIENT_FIELD_NAME')
recipient_field_value = app.config.get('RECIPIENT_FIELD_VALUE')
# Display the home page
@app.route('/')
def home_page():
access_token = request.args.get('token')
# Render the home page
return render_template('home.html',
access_token=access_token)
@app.route('/new_token')
def new_token():
access_token = signinghub_api.get_access_token()
# Show error message if needed
if signinghub_api.last_error_message:
return render_template('show_error_message.html',
access_token=access_token,
last_function_name=signinghub_api.last_function_name,
last_error_message=signinghub_api.last_error_message)
# Redirect to home page
return redirect(url_for('home_page')+'?token='+access_token)
# Retrieve and render a list of Adobe Sign Library Documents
@app.route('/show_packages')
def show_packages():
# Get access token from the URL query string
access_token = request.args.get('token')
# signinghub_api.delete_package(access_token, 201080)
# Use SigningHubAPI to retrieve a list of library documents
if access_token:
packages = signinghub_api.get_packages(access_token)
else:
packages = []
for package in packages:
print(json.dumps(package, indent=4))
# Show error message if needed
if signinghub_api.last_error_message:
return render_template('show_error_message.html',
access_token=access_token,
last_function_name=signinghub_api.last_function_name,
last_error_message=signinghub_api.last_error_message)
# Render the list of documents
return render_template('show_packages.html',
access_token=access_token,
packages=packages)
# Create and render an Adobe Sign Widget
@app.route('/show_iframe')
def show_iframe():
# Get access token from the URL query string
access_token = request.args.get('token')
if not access_token: return redirect('/')
# Create a package
package_name = '2017 Contract - '+recipient_user_name+' - '+recipient_user_email
package_id = signinghub_api.add_package(access_token, package_name)
# Add a document from the document library
if package_id:
document_id = signinghub_api.upload_document_from_library(access_token, package_id, signinghub_library_document_id)
# Rename document
if document_id:
document_name = package_name
success = signinghub_api.rename_document(access_token, package_id, document_id, document_name)
# Add a template
if success:
template_name = signinghub_template_name
success = signinghub_api.apply_workflow_template(access_token, package_id, document_id, template_name)
# print fields, so that we can determine the name of the text field
if success:
fields = signinghub_api.get_document_fields(access_token, package_id, document_id)
print('Fields:', json.dumps(fields, indent=4))
# Pre-fill the text field
success = signinghub_api.update_textbox_field(access_token, package_id, document_id,
fields, recipient_field_name, recipient_field_value)
# Add signer
if success:
success = signinghub_api.update_workflow_user(access_token, package_id, recipient_user_email, recipient_user_name)
# Share Package
if success:
success = signinghub_api.share_document(access_token, package_id)
# Show error message if needed
if signinghub_api.last_error_message:
return render_template('show_error_message.html',
access_token=access_token,
last_function_name=signinghub_api.last_function_name,
last_error_message=signinghub_api.last_error_message)
# Render the IFrame with the document for signing
return render_template('show_iframe.html',
access_token=access_token,
package_id=package_id,
user_email=recipient_user_email)
# SigningHub Callback, called after a user finishes the IFrame
@app.route('/signinghub/callback') # Must match SigningHub's Application call-back URL setting
def signinghub_callback():
# Retrieve callback info from the query parameters
access_token = request.args.get('token')
package_id = request.args.get('document_id') # legacy parameter name. It really points to the Package.
language_code = request.args.get('language')
user_email = request.args.get('user_email')
# Render a finished message
return render_template('finished.html',
access_token=access_token,
package_id=package_id,
language_code=language_code,
user_email=user_email)
|
# Objectives and constraints functions (Aug. 04, 2021)
import math
import numpy as np
def objective(var_o): #objetive functions Weibull
#confiabilidade
gamma_ =
theta =
t = int(math.floor(var_o.copy()))
r_t = math.exp(-((t/theta)**(gamma_)))
#custo
c_m = 1000
c_r = 2500
c_inc = 10000
t_ser = 87600
mttf =
c_t = (t_ser/t)*c_m*r_t + (t_ser/mttf)*(c_r+c_inc)*(1-r_t)
#função objetivo
y = c_t
return y
def constraints(var_c): #constraint functions
#confiabilidade
gamma_ =
theta =
lim = #limite
t = int(math.floor(var_c.copy()))
r_t = math.exp(-((t/theta)**(gamma_)))
#disponibilidade
t_m = #tempo de reparo
t_r = #tempo de manutenção
a_t = t/(t + r_t*t_m + (1-r_t)*t_r)
#Substituir r_t por a_t para usar função de confiabilidade como restrição
#constraint functions 1 to n
if (r_t >= lim): #test conditions 1 to n
return True #all conditions has been met
else:
return False #one or mor_t condition hasn't been met
def objective(var_o): #objetive functions Lognormal
#confiabilidade
mu = 5.9093828021596
sigma = 0.486238331177103
t = int(math.floor(var_o.copy()))
z = (mu - math.log(var_o))/sigma
termo_1 = ((4-math.pi)*abs(abs(z)) + math.sqrt(2*math.pi)*(math.pi-2))
termo_2 = (((4-math.pi)*math.sqrt(2*math.pi)*abs(z)**2)+(2*math.pi*abs(z))+(2*math.sqrt(2*math.pi)*(math.pi-2)))
termo_3 = math.exp(-(abs(z)**2)/2)
if z < 0:
r_t = 1-((termo_1/termo_2)*termo_3)
else:
r_t = 1 - (1-((termo_1/termo_2)*termo_3))
#custo
c_m = 1000
c_r = 2500
c_inc = 10000
t_ser = 87600
mttf = 413
c_t = (t_ser/t)*c_m*r_t + (t_ser/mttf)*(c_r+c_inc)*(1-r_t)
#função objetivo
y = c_t
return y
def constraints(var_c): #constraint functions
#confiabilidade
mu = 5.9093828021596
sigma = 0.486238331177103
t = int(math.floor(var_c.copy()))
z = (mu - math.log(var_c))/sigma
termo_1 = ((4-math.pi)*abs(abs(z)) + math.sqrt(2*math.pi)*(math.pi-2))
termo_2 = (((4-math.pi)*math.sqrt(2*math.pi)*abs(z)**2)+(2*math.pi*abs(z))+(2*math.sqrt(2*math.pi)*(math.pi-2)))
termo_3 = math.exp(-(abs(z)**2)/2)
if z < 0:
r_t = 1-((termo_1/termo_2)*termo_3)
else:
r_t = 1 - (1-((termo_1/termo_2)*termo_3))
#disponibilidade
t_m = 3 #tempo de reparo
t_r = 5 #tempo de manutenção
a_t = t/(t + r_t*t_m + (1-r_t)*t_r)
#Substituir r_t por a_t para usar função de confiabilidade como restrição
#constraint functions 1 to n
if (a_t >= 0.99): #test conditions 1 to n
return True #all conditions has been met
else:
return False #one or mor_t condition hasn't been met |
import tqdm
import os
import json
import re
import itertools
import math
import matplotlib.pyplot as plt
datasetFolder = "../dataset/PigData/"
experiment = "pig5_v4"
valNbEntities = [ 3, 6, 10 ]
########################################################################################
for nbEntities in valNbEntities:
valFolder = datasetFolder + "validation_{}stk/".format(nbEntities)
valJsons = "../training_results/{}/pig/pig_5/{}stk/".format(experiment, nbEntities)
# Get json in the valJson folder
jsons = os.listdir(valJsons)
# Get image ids
img_names = os.listdir(valFolder)
img_ids = [ int(img_id.split('.')[0]) for img_id in img_names ]
nbIterations = []
nbDetectedAccuracies = []
nbDetectErrors = []
print "Reading validation_{}stk data...".format(nbEntities)
# for valFile in jsons[0:1]:
for valFile in tqdm.tqdm(jsons):
# print "Reading " + valFile
# Reading validation json
try:
with open(valJsons + valFile) as file:
data = json.load(file)
except:
print "\nError reading " + valFile
continue
# Building detection dictionnary
detectionList = dict()
for detect in data:
img_id = detect['image_id']
if not detectionList.has_key(img_id):
detectionList[img_id] = []
detectionList[img_id].append(detect)
# print detectionLists
# Nb of entities detected:
correctPigNb = 0
nbDetectedError = 0
for img_id in img_ids:
if detectionList.has_key(img_id): # At least one detection for this frame
PigCountPredict = len(detectionList[img_id])
else: # No detection for this frame
PigCountPredict = 0
# Nb of entities detected
# if PigCountPredict == nbEntities:
# correctPigNb += 1
# Nb of entities detected -+1
if abs(PigCountPredict - nbEntities) < 2:
correctPigNb += 1
nbDetectedError += abs(PigCountPredict - nbEntities)
# Entity Detection Error
nbDetectErrors.append(float(nbDetectedError) / float(len(img_ids)))
# Compute the detection accuracy
nbDetectedAcc = float(correctPigNb) / float(len(img_ids)) * 100
nbDetectedAccuracies.append(nbDetectedAcc)
# Get the iteration nb of the model
nbIterations.append(int(re.split(r'\_|\.', valFile)[2]))
# Sorting accuracies in natural order
tmp = zip(nbIterations, nbDetectedAccuracies, nbDetectErrors)
tmp.sort()
nbIterations, nbDetectedAccuracies, nbDetectErrors = zip(*tmp)
# Plot accuracies
plt.plot(nbIterations, nbDetectErrors)
# plt.plot(nbIterations, nbDetectedAccuracies)
# plt.xscale('log')
plt.title('Entity Detection Error ({})'.format(experiment))
plt.xlabel("Nb iterations")
plt.ylabel("Average Error")
plt.legend([ 'duroc_{}stk'.format(nbEntities) for nbEntities in valNbEntities ])
plt.grid(which='minor', axis='both')
plt.grid(which='major', axis='both')
plt.show() |
import scrapy
from ..items import DaGroup7Item
from scrapy.loader import ItemLoader
class TilesSpider(scrapy.Spider):
name = 'tiles'
allowed_domains = ['magnatiles.com']
start_urls = ['http://magnatiles.com/products/page/1/']
def parse(self, response):
for p in response.css('ul.products li'):
il = ItemLoader(item=DaGroup7Item(), selector=p)
il.add_css('imageURL', 'img.attachment-woocommerce_thumbnail::attr(data-lazy-src)')
il.add_css('sku', 'a.button::attr(data-product_sku)')
il.add_css('name', 'h2')
il.add_css('price', 'span.price bdi')
yield il.load_item()
next_page = response.css('ul.page-numbers a.next::attr(href)').get()
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
# Importing appropriate libraries
import requests
import unittest
class TestingHeader(unittest.TestCase):
headers = {'User-Agent': 'Mobile'}
url2 = 'http://httpbin.org/headers'
rh = requests.get(url2, headers=headers)
print(rh.text)
def test_headers(self):
self.assertTrue(TestingHeader.headers, 'Mobile')
if __name__ == '__main__':
unittest.main() |
'''
https://www.hackerrank.com/challenges/prime-date/submissions/code/102871849
Debug the given function findPrimeDates and/or other lines of code, to find the correct lucky dates from the given input.
Note: You can modify at most five lines in the given code and you cannot add or remove lines to the code.
'''
import re
month = []
def updateLeapYear(year):
if year % 400 == 0:
month[2] = 29 # debug 1 from 28 to 29
elif year % 100 == 0:
month[2] = 28 @ debug 2 from 29 to 28
elif year % 4 == 0:
month[2] = 29
else:
month[2] = 28
def storeMonth():
month[1] = 31
month[2] = 28
month[3] = 31
month[4] = 30
month[5] = 31
month[6] = 30
month[7] = 31
month[8] = 31
month[9] = 30
month[10] = 31
month[11] = 30
month[12] = 31
def findPrimeDates(d1, m1, y1, d2, m2, y2):
storeMonth()
result = 0
while(True):
x = d1
x = x * 100 + m1
x = x * 10000 + y1 # debug 3 from 1000 t0 10000
if x % 4 == 0 or x % 7 == 0: # debug 4 from and to or
result = result + 1
if d1 == d2 and m1 == m2 and y1 == y2:
break
updateLeapYear(y1)
d1 = d1 + 1
if d1 > month[m1]:
m1 = m1 + 1
d1 = 1
if m1 > 12:
y1 = y1 + 1
m1 = 1 # debug 5 from m1 +1 to 1 for year change so month = 1
return result;
for i in range(1, 15):
month.append(31)
line = input()
date = re.split('-| ', line)
d1 = int(date[0])
m1 = int(date[1])
y1 = int(date[2])
d2 = int(date[3])
m2 = int(date[4])
y2 = int(date[5])
result = findPrimeDates(d1, m1, y1, d2, m2, y2)
print(result)
|
################################################################################
# Modules and functions import statements
################################################################################
import pdb
from helpers.app_helpers import *
from helpers.page_helpers import *
from helpers.jinja2_helpers import *
################################################################################
# Setup helper functions
################################################################################
# N/A
################################################################################
# Setup commonly used routes
################################################################################
@route('/software/')
@route('/software')
def display_software_home_page(errorMessages=None):
redirect("/software/news")
context = get_default_context(request)
#response.set_cookie('username', 'the username')
return jinja2_env.get_template('html/software/home-page.html').render(context)
@route('/software/news')
def display_software_news_page(errorMessages=None):
context = get_default_context(request)
return jinja2_env.get_template('html/software/news-page.html').render(context)
|
import cv2
import pandas as pd
import numpy as np
from webcolors import hex_to_rgb
from talking_color.algorithms.eucledian_rgb import EucledianRGB
class EucledianHSV(EucledianRGB):
color_space = 'HSV'
def _get_frame_in_color_space(self, frame):
return cv2.cvtColor(frame, cv2.COLOR_BGR2HSV_FULL)
@staticmethod
def _get_processed_df(df_train) -> pd.DataFrame:
# parse rgb values
rgb_values = []
for i in range(len(df_train)):
rgb = hex_to_rgb(df_train['Hex'][i])
rgb_values.append([rgb.red, rgb.green, rgb.blue])
rgb_values_array = np.array([rgb_values], dtype=np.uint8)
# convert to hsv
hsv_values_array = cv2.cvtColor(rgb_values_array, cv2.COLOR_RGB2HSV)
df = pd.DataFrame(hsv_values_array[0], columns=['H', 'S', 'V'])
df_final = df_train.join(df)
df_final['HSV'] = list(zip(df_final['H'], df_final['S'], df_final['V']))
return df_final
|
#!/usr/bin/env python3
"""
A client for fuzzbucket.
Configuration is accepted via the following environment variables:
FUZZBUCKET_URL - string URL of the fuzzbucket instance including path prefix
FUZZBUCKET_LOG_LEVEL - log level name (default="INFO")
Optional:
FUZZBUCKET_CREDENTIALS - credentials string value
see ~/.cache/fuzzbucket/credentials
FUZZBUCKET_PREFERENCES - preferences JSON string value
see ~/.cache/fuzzbucket/preferences
"""
import argparse
import configparser
import contextlib
import datetime
import enum
import fnmatch
import getpass
import io
import json
import logging
import os
import pathlib
import re
import sys
import textwrap
import typing
import urllib.parse
import urllib.request
import webbrowser
from fuzzbucket_client.__version__ import version as __version__
MIN_TTL = datetime.timedelta(minutes=10)
MAX_TTL = datetime.timedelta(weeks=12)
TTL_HELP = """\
The --ttl argument may be given values that include the following:
seconds as integers or floats
123
456.78
datetime.timedelta strings
'1 day, 2:34:56'
'12:34:56'
'123 days, 4:57:18'
datetime.timedelta-like strings as alternating <value> <unit>
'1 week, 23 days, 45 minutes 6 seconds'
'12 weeks, 3.9 days 4 hour 56 minutes'
The top-level --check-ttl flag may be used to check a ttl value prior to using it with
this command.
"""
def default_client() -> "Client":
return Client()
def config_logging(level: int = logging.INFO, stream: typing.TextIO = sys.stderr):
logging.basicConfig(
stream=stream,
style="{",
format="# {name}:{levelname}:{asctime}:: {message}",
datefmt="%Y-%m-%dT%H%M%S",
level=level,
)
def log_level() -> int:
return getattr(
logging,
os.environ.get("FUZZBUCKET_LOG_LEVEL", "INFO").strip().upper(),
)
def _reverse_map_float(el: typing.Tuple[str, str]) -> typing.Tuple[str, float]:
return (el[1].rstrip("s")) + "s", float(el[0])
def _timedelta_kwargs_from_pairs(pairs: typing.List[str]) -> typing.Dict[str, float]:
as_iter = iter(pairs)
return dict(map(_reverse_map_float, list(zip(as_iter, as_iter))))
def _timedelta_kwargs_from_sexagesimal(
sexagesimal_string: str,
) -> typing.Dict[str, float]:
return dict(
map(
_reverse_map_float,
list(
zip(
reversed(
[p.strip() for p in sexagesimal_string.strip().split(":")]
),
["seconds", "minutes", "hours"],
)
),
)
)
def parse_timedelta(as_string: str) -> datetime.timedelta:
pairs = as_string.strip().lower().replace(",", "").split()
sexagesimal_part = None
if len(pairs) == 1:
if ":" in pairs[0]:
sexagesimal_part = pairs[0]
else:
return datetime.timedelta(seconds=float(pairs[0]))
elif len(pairs) % 2 != 0:
if ":" in pairs[-1]:
sexagesimal_part = pairs[-1]
else:
raise ValueError(
f"timedelta string {as_string!r} is not in an understandable format"
)
kwargs = _timedelta_kwargs_from_pairs(pairs)
if sexagesimal_part is not None:
kwargs.update(_timedelta_kwargs_from_sexagesimal(sexagesimal_part))
unknown_keys = set(kwargs.keys()).difference(
set(
[
"days",
"hours",
"minutes",
"seconds",
"weeks",
]
)
)
if len(unknown_keys) > 0:
raise ValueError(f"unknown timedelta keys {unknown_keys!r}")
return datetime.timedelta(
days=kwargs.get("days", 0),
hours=kwargs.get("hours", 0),
minutes=kwargs.get("minutes", 0),
seconds=kwargs.get("seconds", 0),
weeks=kwargs.get("weeks", 0),
)
def _instance_tags_from_string(input_string: str) -> typing.Dict[str, str]:
instance_tags = {}
for pair in filter(
lambda s: s != "",
[s.strip() for s in input_string.split(",")],
):
if ":" not in pair:
raise ValueError(f"instance_tag={pair!r} is not a '<key>:<value>' pair")
key, value = [
urllib.parse.unquote(str(s.strip()))
for s in pair.strip().split(":", maxsplit=1)
]
log.debug(f"adding instance tag key={key!r} value={value!r}")
instance_tags[key] = value
return instance_tags
def utcnow() -> datetime.datetime:
return datetime.datetime.utcnow()
log = logging.getLogger("fuzzbucket")
class CustomHelpFormatter(
argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter
):
...
def main(sysargs: typing.List[str] = sys.argv[:]) -> int:
client = default_client()
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=CustomHelpFormatter,
)
parser.add_argument(
"--version", action="store_true", help="print the version and exit"
)
parser.add_argument(
"--check-ttl",
type=parse_timedelta,
default=None,
help="check a ttl value and exit, presumably before using it with a command"
+ "that supports ttl",
)
parser.add_argument(
"-j",
"--output-json",
action="store_true",
default=False,
help="format all output as json",
)
parser.add_argument(
"-D",
"--debug",
action="store_true",
default=log_level() == logging.DEBUG,
help="enable debug logging",
)
subparsers = parser.add_subparsers(
title="commands",
)
parser_login = subparsers.add_parser(
"login", help="login via GitHub", formatter_class=CustomHelpFormatter
)
parser_login.add_argument("user", help="GitHub username")
parser_login.add_argument(
"-n",
"--name",
default=None,
help="human-friendly name to give to credentials entry",
)
parser_login.set_defaults(func=client.login)
parser_login.epilog = textwrap.dedent(
"""
NOTE: Use the exact letter casing expected by GitHub to
avoid weirdness.
"""
)
parser_logout = subparsers.add_parser(
"logout",
help="logout (from fuzzbucket *only*)",
formatter_class=CustomHelpFormatter,
)
parser_logout.set_defaults(func=client.logout)
parser_create = subparsers.add_parser(
"create",
aliases=["new"],
help="create a box",
description="\n\n".join(["Create a box.", TTL_HELP]),
formatter_class=CustomHelpFormatter,
)
parser_create.add_argument(
"image", default=Client.default_image_alias, help="image alias or full AMI id"
)
parser_create.add_argument(
"-n", "--name", help="custom name for box (generated if omitted)"
)
parser_create.add_argument(
"-c",
"--connect",
action="store_true",
help="add connect-specific security group for accessing ports 3939 and 13939",
)
parser_create.add_argument(
"-T",
"--ttl",
type=parse_timedelta,
default=datetime.timedelta(hours=4),
help="set the TTL for the box, after which it will be reaped ",
)
parser_create.add_argument("-t", "--instance-type", default=None)
parser_create.add_argument(
"-S",
"--root-volume-size",
default=None,
help="set the root volume size (in GB)",
)
parser_create.add_argument(
"-k",
"--key-alias",
default=None,
help="specify which key alias to use",
)
parser_create.add_argument(
"-X",
"--instance-tags",
default=None,
help="key:value comma-delimited instance tags (optionally url-encoded)",
)
parser_create.set_defaults(func=client.create)
parser_list = subparsers.add_parser(
"list",
aliases=["ls"],
help="list your boxes",
formatter_class=CustomHelpFormatter,
)
parser_list.set_defaults(func=client.list)
parser_update = subparsers.add_parser(
"update",
aliases=["up"],
help="update matching boxes",
description="\n\n".join(["Update matching boxes.", TTL_HELP]),
formatter_class=CustomHelpFormatter,
)
parser_update.add_argument(
"-T",
"--ttl",
type=parse_timedelta,
default=None,
help="set the new TTL for the matching boxes relative to the current time, "
+ "after which they will be reaped",
)
parser_update.add_argument(
"-X",
"--instance-tags",
default=None,
help="key:value comma-delimited instance tags (optionally url-encoded)",
)
parser_update.add_argument("box_match")
parser_update.set_defaults(func=client.update)
parser_delete = subparsers.add_parser(
"delete",
aliases=["rm"],
help="delete matching boxes",
formatter_class=CustomHelpFormatter,
)
parser_delete.add_argument("box_match")
parser_delete.set_defaults(func=client.delete)
parser_reboot = subparsers.add_parser(
"reboot",
aliases=["restart"],
help="reboot a box",
formatter_class=CustomHelpFormatter,
)
parser_reboot.add_argument("box")
parser_reboot.set_defaults(func=client.reboot)
parser_ssh = subparsers.add_parser(
"ssh", help="ssh into a box", formatter_class=CustomHelpFormatter
)
parser_ssh.add_argument(
"-q",
"--quiet",
action="store_true",
help="suppress box info header",
)
parser_ssh.usage = "usage: %(prog)s [-hq] box [ssh-arguments]"
parser_ssh.description = textwrap.dedent(
"""
ssh into a box, optionally passing arbitrary commands as positional
arguments. Additionally, stdio streams will be inherited by the ssh
process in order to support piping.
"""
)
parser_ssh.epilog = textwrap.dedent(
"""
NOTE: If no login is provided via the "-l" ssh option, a value will
be guessed based on the box image alias.
"""
)
parser_ssh.add_argument("box")
parser_ssh.set_defaults(func=client.ssh)
parser_scp = subparsers.add_parser(
"scp",
help="scp things into or out of a box",
formatter_class=CustomHelpFormatter,
)
parser_scp.usage = "usage: %(prog)s [-h] box [scp-arguments]"
parser_scp.description = textwrap.dedent(
"""
scp things into or out of a box, optionally passing arbitrary commands
as positional arguments. Additionally, stdio streams will be inherited
by the scp process in order to support piping.
"""
)
parser_scp.epilog = textwrap.dedent(
"""
NOTE: If no login is provided in at least one of the source or
destination arguments, a value will be guessed based on the box image
alias.
IMPORTANT: The fully-qualified address of the box will be substituted in
the remaining command arguments wherever the literal "__BOX__" appears,
e.g.:
the command:
%(prog)s boxname -r ./some/local/path __BOX__:/tmp/
becomes:
scp -r ./some/local/path user@boxname.fully.qualified.example.com:/tmp/
the command:
%(prog)s boxname -r 'altuser@__BOX__:/var/log/*.log' ./some/local/path/
becomes:
scp -r altuser@boxname.fully.qualified.example.com:/var/log/*.log \\
./some/local/path/
"""
)
parser_scp.add_argument("box")
parser_scp.set_defaults(func=client.scp)
parser_create_alias = subparsers.add_parser(
"create-alias",
help="create an image alias",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser_create_alias.add_argument("alias")
parser_create_alias.add_argument("ami")
parser_create_alias.set_defaults(func=client.create_alias)
parser_list_aliases = subparsers.add_parser(
"list-aliases",
aliases=["la"],
help="list known image aliases",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser_list_aliases.set_defaults(func=client.list_aliases)
parser_delete_alias = subparsers.add_parser(
"delete-alias",
help="delete an image alias",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser_delete_alias.add_argument("alias")
parser_delete_alias.set_defaults(func=client.delete_alias)
parser_get_key = subparsers.add_parser(
"get-key",
help="get an ssh public key id and fingerprint as stored in EC2",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser_get_key.add_argument(
"--alias",
"-a",
type=str,
default="default",
help="the alias of the key to get",
)
parser_get_key.set_defaults(func=client.get_key)
parser_set_key = subparsers.add_parser(
"set-key",
help="set the local default key alias to use when creating boxes",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser_set_key.add_argument(
"--alias",
"-a",
type=str,
default="default",
help="the alias of the key to set as the local default",
)
parser_set_key.set_defaults(func=client.set_key)
parser_list_keys = subparsers.add_parser(
"list-keys",
help="list ssh public keys stored in EC2",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser_list_keys.set_defaults(func=client.list_keys)
parser_add_key = subparsers.add_parser(
"add-key",
help="add an ssh public key to EC2",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser_add_key.add_argument(
"--alias",
"-a",
type=str,
default="default",
help="the alias of the key to add",
)
parser_add_key.add_argument(
"--filename",
"-f",
type=lambda f: pathlib.Path(f).expanduser(),
default=pathlib.Path("~/.ssh/id_rsa.pub").expanduser(),
help="file path of the ssh public key",
)
parser_add_key.set_defaults(func=client.add_key)
parser_delete_key = subparsers.add_parser(
"delete-key",
help="delete an ssh public key stored in EC2",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser_delete_key.add_argument(
"--alias",
"-a",
type=str,
default="default",
help="the alias of the key to delete",
)
parser_delete_key.set_defaults(func=client.delete_key)
known_args, unknown_args = parser.parse_known_args(sysargs[1:])
config_logging(level=logging.DEBUG if known_args.debug else logging.INFO)
if known_args.version:
print(f"fuzzbucket-client {__version__}")
return 0
if known_args.output_json:
client.data_format = _DataFormats.JSON
if known_args.check_ttl:
client.show_valid_ttl(known_args.check_ttl)
return 0
if not hasattr(known_args, "func"):
log.debug(f"no subcommand func defined in namespace={known_args!r}")
parser.print_help()
return 2
if known_args.func(known_args, unknown_args):
return 0
return 86
def _print_auth_hint():
print(
textwrap.dedent(
"""
Please run the following command with your GitHub username
to grant access to Fuzzbucket:
fuzzbucket-client login {github-username}
If you believe you are already logged in, there is a chance
that you logged in with different letter casing than what
GitHub expects. Please double check the letter casing of
your username and then retry login after removing your
existing login data:
fuzzbucket-client logout
fuzzbucket-client login {github-username}
"""
)
)
def _pjoin(*parts: str) -> str:
return "/".join(parts)
NOSETUP_COMMANDS = ("login",)
def _command(method):
def handle_exc(exc):
msg = f"command {method.__name__!r} failed"
if log_level() == logging.DEBUG:
log.exception(msg)
else:
log.error(f"{msg} err={exc!r}")
return False
def wrapper(self, known_args, unknown_args):
try:
if method.__name__ not in NOSETUP_COMMANDS:
self._setup()
result = method(self, known_args, unknown_args)
self._finalize()
return result
except urllib.request.HTTPError as exc:
try:
response = json.load(exc)
log.error(
f"command {method.__name__!r} failed err={response.get('error')!r}"
)
if exc.code == 403:
_print_auth_hint()
return False
except Exception as exc:
return handle_exc(exc)
except CredentialsError as exc:
log.error(f"command {method.__name__!r} failed err={exc}")
_print_auth_hint()
return False
except Exception as exc:
return handle_exc(exc)
return wrapper
class CredentialsError(ValueError):
def __init__(self, url: str, credentials_path: str) -> None:
self.url = url
self.credentials_path = credentials_path
def __str__(self) -> str:
return (
f"No credentials found for url={self.url!r} in "
+ f"file={self.credentials_path!r}"
)
class _DataFormats(enum.Enum):
INI = "ini"
JSON = "json"
class _Preferences(enum.Enum):
DEFAULT_KEY_ALIAS = "default_key_alias"
class Client:
default_instance_type = "t3.small"
default_image_alias = "ubuntu18"
default_instance_types = {
"centos6": "t2.small",
"rhel6": "t2.small",
"sles12": "t2.small",
None: default_instance_type,
}
default_key_alias = "default"
default_ssh_user = "ec2-user"
default_ssh_users = {
"centos": "centos",
"rhel": default_ssh_user,
"sles": default_ssh_user,
"suse": default_ssh_user,
"ubuntu": "ubuntu",
}
def __init__(
self,
env: typing.Optional[typing.Dict[str, str]] = None,
):
self._env = env if env is not None else dict(os.environ)
self._cached_url_opener = None
self._cached_credentials = None
self._patched_credentials_file = None
self._cached_preferences = None
self.data_format = _DataFormats.INI
def _setup(self):
if self._url is None:
raise ValueError("missing FUZZBUCKET_URL")
if self._credentials in (None, ""):
raise CredentialsError(self._url, self._credentials_file)
def _finalize(self):
self._write_preferences(self._preferences)
def show_valid_ttl(self, ttl):
print(self._format_valid_ttl(ttl), end="")
return True
@_command
def login(self, known_args, _):
if self._url is None:
raise ValueError("missing FUZZBUCKET_URL")
log.debug(f"starting login flow for user={known_args.user}")
login_url = "?".join(
[
_pjoin(self._url, "_login"),
urllib.parse.urlencode(dict(user=known_args.user)),
]
)
webbrowser.open(login_url)
print(
textwrap.dedent(
f"""
Attempting to open the following URL in a browser:
{login_url}
Please follow the OAuth2 flow and then paste the 'secret' provided
by fuzzbucket.
"""
)
)
secret = None
while secret is None:
try:
raw_secret = getpass.getpass("secret: ").strip()
if len(raw_secret) != 42:
print("Invalid secret provided. Please try again.")
continue
secret = raw_secret
except KeyboardInterrupt:
return False
self._write_credentials(known_args.user, secret, name=known_args.name)
print(f"Login successful user={known_args.user!r}")
return True
@_command
def logout(self, *_):
log.debug(f"starting logout for user={self._user!r}")
req = self._build_request(_pjoin(self._url, "_logout"), method="POST")
with self._urlopen(req) as response:
_ = response.read()
log.info(f"logged out user={self._user!r}")
return True
@_command
def list(self, *_):
log.debug(f"fetching boxes for user={self._user!r}")
boxes = self._list_boxes()
log.info(f"fetched boxes for user={self._user!r} count={len(boxes)}")
print(self._format_boxes(boxes), end="")
return True
@_command
def create(self, known_args, _):
key_alias = self._preferences.get(
_Preferences.DEFAULT_KEY_ALIAS.value, self.default_key_alias
)
if known_args.key_alias is not None:
key_alias = known_args.key_alias
self._preferences[_Preferences.DEFAULT_KEY_ALIAS.value] = key_alias
payload = {
"instance_type": known_args.instance_type,
"key_alias": key_alias,
}
if known_args.image.startswith("ami-"):
payload["ami"] = known_args.image
else:
payload["image_alias"] = known_args.image
if known_args.root_volume_size is not None:
if not str(known_args.root_volume_size).isdigit():
log.error(
f"root_volume_size={known_args.root_volume_size!r} is not numeric"
)
return False
payload["root_volume_size"] = int(known_args.root_volume_size)
if payload["instance_type"] is None:
payload["instance_type"] = self.default_instance_types.get(
payload.get("image_alias"),
self.default_instance_type,
)
if known_args.connect:
payload["connect"] = "1"
if known_args.name != "":
payload["name"] = known_args.name
if known_args.instance_tags:
payload["instance_tags"] = _instance_tags_from_string(
known_args.instance_tags
)
if known_args.ttl.total_seconds() < MIN_TTL.total_seconds():
log.error(f"ttl={known_args.ttl!r} is below the minimum of {MIN_TTL}")
return False
if known_args.ttl.total_seconds() > MAX_TTL.total_seconds():
log.error(f"ttl={known_args.ttl!r} is above the maximum of {MAX_TTL}")
return False
payload["ttl"] = str(int(known_args.ttl.total_seconds()))
req = self._build_request(
_pjoin(self._url, "box"),
data=json.dumps(payload).encode("utf-8"),
headers={"Content-Type": "application/json"},
method="POST",
)
raw_response = {}
try:
with self._urlopen(req) as response:
raw_response = json.load(response)
log.info(f"created box for user={self._user!r}")
except urllib.request.HTTPError as exc:
if exc.code == 409:
log.warning("matching box already exists")
raw_response = json.load(exc)
else:
raise exc
print(self._format_boxes(raw_response["boxes"]), end="")
return True
@_command
def update(self, known_args, _):
matching_boxes = self._find_boxes(known_args.box_match)
if matching_boxes is None:
log.error(f"no boxes found matching {known_args.box_match!r}")
return False
payload = {}
if known_args.ttl:
if known_args.ttl.total_seconds() < MIN_TTL.total_seconds():
log.error(f"ttl={known_args.ttl!r} is below the minimum of {MIN_TTL}")
return False
if known_args.ttl.total_seconds() > MAX_TTL.total_seconds():
log.error(f"ttl={known_args.ttl!r} is above the maximum of {MAX_TTL}")
return False
if known_args.instance_tags:
payload["instance_tags"] = _instance_tags_from_string(
known_args.instance_tags
)
if len(payload) == 0 and known_args.ttl is None:
log.error(f"no updates specified for {known_args.box_match!r}")
return False
for matching_box in matching_boxes:
box_payload = payload.copy()
if known_args.ttl:
box_age = parse_timedelta(matching_box["age"])
box_payload["ttl"] = str(
int(box_age.total_seconds() + known_args.ttl.total_seconds())
)
log.debug(
f"setting ttl={box_payload['ttl']!r} for "
+ f"matching_box={matching_box!r}"
)
req = self._build_request(
_pjoin(self._url, "box", matching_box["instance_id"]),
data=json.dumps(box_payload).encode("utf-8"),
headers={"Content-Type": "application/json"},
method="PUT",
)
with self._urlopen(req) as response:
_ = response.read()
log.info(f"updated box for user={self._user!r} name={matching_box['name']}")
print(self._format_boxes([matching_box]), end="")
return True
@_command
def delete(self, known_args, _):
matching_boxes = self._find_boxes(known_args.box_match)
if matching_boxes is None:
log.error(f"no boxes found matching {known_args.box_match!r}")
return False
for matching_box in matching_boxes:
req = self._build_request(
_pjoin(self._url, "box", matching_box["instance_id"]),
method="DELETE",
)
with self._urlopen(req) as response:
_ = response.read()
log.info(f"deleted box for user={self._user!r} name={matching_box['name']}")
print(self._format_boxes([matching_box]), end="")
return True
@_command
def reboot(self, known_args, _):
matching_box = self._find_box(known_args.box)
if matching_box is None:
log.error(f"no box found matching {known_args.box!r}")
return False
req = self._build_request(
_pjoin(self._url, "reboot", matching_box["instance_id"]),
method="POST",
)
with self._urlopen(req) as response:
_ = response.read()
log.info(f"rebooted box for user={self._user!r} box={matching_box['name']!r}")
print(self._format_boxes([matching_box]), end="")
return True
@_command
def ssh(self, known_args, unknown_args):
matching_box, ok = self._resolve_sshable_box(known_args.box)
if not ok:
return False
ssh_command = self._build_ssh_command(matching_box, unknown_args)
if not known_args.quiet:
log.info(
f"sshing into matching_box={matching_box['name']!r} "
+ f"ssh_command={ssh_command!r}"
)
print(self._format_boxes([matching_box]), end="")
sys.stdout.flush()
sys.stderr.flush()
os.execvp("ssh", ssh_command)
return True
@_command
def scp(self, known_args, unknown_args):
matching_box, ok = self._resolve_sshable_box(known_args.box)
if not ok:
return False
scp_command = self._build_scp_command(matching_box, unknown_args)
log.info(
f"scping with matching_box={matching_box['name']!r} "
+ f"scp_command={scp_command!r}"
)
print(self._format_boxes([matching_box]), end="")
sys.stdout.flush()
sys.stderr.flush()
os.execvp("scp", scp_command)
return True
@_command
def list_aliases(self, *_):
req = self._build_request(_pjoin(self._url, "image-alias"))
raw_response = {}
with self._urlopen(req) as response:
raw_response = json.load(response)
if "image_aliases" not in raw_response:
log.error("failed to fetch image aliases")
return False
print(self._format_image_aliases(raw_response["image_aliases"]), end="")
return True
@_command
def create_alias(self, known_args, _):
payload = {"alias": known_args.alias, "ami": known_args.ami}
req = self._build_request(
_pjoin(self._url, "image-alias"),
data=json.dumps(payload).encode("utf-8"),
headers={"Content-Type": "application/json"},
method="POST",
)
raw_response = {}
with self._urlopen(req) as response:
raw_response = json.load(response)
log.debug(f"raw created alias response={raw_response!r}")
if "image_aliases" not in raw_response:
log.error("failed to create image alias")
return False
for key, value in raw_response["image_aliases"].items():
log.info(f"created alias for user={self._user!r} alias={key} ami={value}")
print(self._format_image_aliases(raw_response["image_aliases"]), end="")
return True
@_command
def delete_alias(self, known_args, _):
req = self._build_request(
_pjoin(self._url, "image-alias", known_args.alias), method="DELETE"
)
with self._urlopen(req) as response:
_ = response.read()
log.info(f"deleted alias for user={self._user!r} alias={known_args.alias}")
return True
@_command
def get_key(self, known_args, _):
key_alias = self._preferences.get(
_Preferences.DEFAULT_KEY_ALIAS.value, self.default_key_alias
)
if known_args.alias is not None:
key_alias = known_args.alias
self._preferences[_Preferences.DEFAULT_KEY_ALIAS.value] = key_alias
req_url = _pjoin(self._url, "key")
if key_alias != self.default_key_alias:
req_url = _pjoin(self._url, "key", key_alias)
req = self._build_request(req_url, method="GET")
raw_response = {}
with self._urlopen(req) as response:
raw_response = json.load(response)
print(self._format_keys([raw_response["key"]]), end="")
return True
@_command
def set_key(self, known_args, _):
self._preferences[_Preferences.DEFAULT_KEY_ALIAS.value] = known_args.alias
log.info(
f"set key with alias={known_args.alias!r} as local default "
+ f"for user={self._user!r}"
)
return True
@_command
def list_keys(self, *_):
req = self._build_request(_pjoin(self._url, "keys"), method="GET")
raw_response = {}
with self._urlopen(req) as response:
raw_response = json.load(response)
print(self._format_keys(raw_response["keys"]), end="")
return True
@_command
def add_key(self, known_args, _):
key_alias = known_args.alias
if key_alias is None:
key_alias = known_args.filename.name.lower().replace("_rsa.pub", "")
if key_alias == "id":
key_alias = self.default_key_alias
self._preferences[_Preferences.DEFAULT_KEY_ALIAS.value] = key_alias
payload = {"key_material": known_args.filename.read_text().strip()}
req_url = _pjoin(self._url, "key")
if key_alias != self.default_key_alias:
req_url = _pjoin(self._url, "key", key_alias)
req = self._build_request(
req_url,
method="PUT",
data=json.dumps(payload).encode("utf-8"),
headers={"Content-Type": "application/json"},
)
raw_response = {}
with self._urlopen(req) as response:
raw_response = json.load(response)
print(self._format_keys([raw_response["key"]]), end="")
return True
@_command
def delete_key(self, known_args, _):
key_alias = self._preferences.get(
_Preferences.DEFAULT_KEY_ALIAS.value, self.default_key_alias
)
if known_args.alias is not None:
key_alias = known_args.alias
self._preferences[_Preferences.DEFAULT_KEY_ALIAS.value] = key_alias
req_url = _pjoin(self._url, "key")
if key_alias != self.default_key_alias:
req_url = _pjoin(self._url, "key", key_alias)
req = self._build_request(req_url, method="DELETE")
raw_response = {}
with self._urlopen(req) as response:
raw_response = json.load(response)
log.info(f"deleted key with alias={key_alias!r} for user={self._user!r}")
print(self._format_keys([raw_response["key"]]), end="")
return True
def _find_box(self, box_search):
results = self._find_boxes(box_search)
if results is None:
return None
return results[0]
def _find_boxes(self, box_search):
boxes = self._list_boxes()
results = []
for box in boxes:
log.debug(f"finding box_search={box_search!r} considering box={box!r}")
if box.get("name") is not None and fnmatch.fnmatchcase(
box["name"], box_search
):
results.append(box)
continue
if box.get("image_alias") is not None and fnmatch.fnmatchcase(
box["image_alias"], box_search
):
results.append(box)
continue
if len(results) == 0:
return None
return results
def _list_boxes(self):
req = self._build_request(self._url)
raw_response = {}
with self._urlopen(req) as response:
raw_response = json.load(response)
return raw_response["boxes"]
@contextlib.contextmanager
def _urlopen(self, request):
log.debug(
f"attempting request user={self._user!r} method={request.method!r} "
+ f"url={request.full_url!r}"
)
with urllib.request.urlopen(request) as response:
yield response
@property
def _url(self):
return self._env.get("FUZZBUCKET_URL")
@property
def _preferences(self):
if self._cached_preferences is None:
self._cached_preferences = self._read_preferences()
return self._cached_preferences
def _read_preferences(self):
try:
if self._env.get("FUZZBUCKET_PREFERENCES") is not None:
log.debug("reading preferences directly from FUZZBUCKET_PREFERENCES")
return json.loads(self._env.get("FUZZBUCKET_PREFERENCES"))
self._preferences_file.touch()
with self._preferences_file.open() as infile:
return json.load(infile)
except json.decoder.JSONDecodeError:
log.debug("failed to load preferences; returning empty preferences")
return {}
def _write_preferences(self, preferences):
if self._env.get("FUZZBUCKET_PREFERENCES") is not None:
log.debug(
"skipping writing preferences due to presence of FUZZBUCKET_PREFERENCES"
)
return
preferences["//"] = "WARNING: this file is generated"
preferences["__updated_at__"] = str(utcnow())
with self._preferences_file.open("w") as outfile:
json.dump(preferences, outfile, sort_keys=True, indent=2)
self._cached_preferences = None
@property
def _preferences_file(self):
file = pathlib.Path("~/.cache/fuzzbucket/preferences").expanduser()
file.parent.mkdir(mode=0o750, parents=True, exist_ok=True)
return file
@property
def _credentials_section(self):
return f'server "{self._url}"'
@property
def _credentials(self):
if self._cached_credentials is None:
self._cached_credentials = self._read_credentials()
return self._cached_credentials
@property
def _credentials_file(self):
if self._patched_credentials_file is not None:
return self._patched_credentials_file
file = pathlib.Path("~/.cache/fuzzbucket/credentials").expanduser()
file.parent.mkdir(mode=0o750, parents=True, exist_ok=True)
return file
@_credentials_file.setter
def _credentials_file(self, value):
self._patched_credentials_file = value
def _read_credentials(self):
if self._env.get("FUZZBUCKET_CREDENTIALS") is not None:
log.debug("reading credentials directly from FUZZBUCKET_CREDENTIALS")
return self._env.get("FUZZBUCKET_CREDENTIALS")
self._credentials_file.touch()
with self._credentials_file.open() as infile:
creds = configparser.ConfigParser()
creds.read_file(infile)
if self._credentials_section not in creds.sections():
return ""
return creds.get(self._credentials_section, "credentials")
def _write_credentials(self, user, secret, name=None):
if self._env.get("FUZZBUCKET_CREDENTIALS") is not None:
log.debug(
"skipping writing credentials due to presence of FUZZBUCKET_CREDENTIALS"
)
return
creds = configparser.ConfigParser()
if self._credentials_file.exists():
with self._credentials_file.open() as infile:
creds.read_file(infile)
if self._credentials_section not in creds.sections():
creds.add_section(self._credentials_section)
creds.set(self._credentials_section, "credentials", f"{user}:{secret}")
if name is not None:
creds.set(self._credentials_section, "name", str(name))
with self._credentials_file.open("w") as outfile:
outfile.write(
"# WARNING: this file is generated " + f"(last update {utcnow()})\n"
)
creds.write(outfile)
self._cached_credentials = None
@property
def _user(self):
return self._credentials.split(":")[0].split("--")[0]
@property
def _secret(self):
return self._credentials.split(":")[1]
def _build_request(self, url, data=None, headers=None, method="GET"):
headers = headers if headers is not None else {}
req = urllib.request.Request(url, data=data, headers=headers, method=method)
req.headers["Fuzzbucket-User"] = self._user
req.headers["Fuzzbucket-Secret"] = self._secret
return req
def _resolve_sshable_box(self, box):
matching_box = self._find_box(box)
if matching_box is None:
log.error(f"no box found matching {box!r}")
return None, False
if matching_box.get("public_dns_name") is None:
log.error(f"no public dns name found for box={matching_box['name']}")
return None, False
return matching_box, True
def _build_ssh_command(self, box, unknown_args):
if "-l" not in unknown_args:
unknown_args = [
"-l",
self._guess_ssh_user(
box.get("image_alias", self.default_image_alias),
self.default_ssh_user,
),
] + unknown_args
return ["ssh", box.get("public_dns_name")] + self._with_ssh_opts(unknown_args)
def _build_scp_command(self, box, unknown_args):
for i, value in enumerate(unknown_args):
if "__BOX__" not in value:
continue
box_value = box["public_dns_name"]
if "@" not in value:
box_value = "@".join(
[
self._guess_ssh_user(
box.get("image_alias", self.default_image_alias),
self.default_ssh_user,
),
box_value,
]
)
unknown_args[i] = value.replace("__BOX__", box_value)
return ["scp"] + self._with_ssh_opts(unknown_args)
def _with_ssh_opts(self, unknown_args: typing.List[str]) -> typing.List[str]:
unknown_args_string = " ".join(unknown_args)
if (
re.search(
" -o StrictHostKeyChecking=.+", unknown_args_string, re.IGNORECASE
)
is None
):
unknown_args = ["-o", "StrictHostKeyChecking=no"] + unknown_args
if (
re.search(" -o UserKnownHostsFile=.+", unknown_args_string, re.IGNORECASE)
is None
):
unknown_args = ["-o", "UserKnownHostsFile=/dev/null"] + unknown_args
return unknown_args
def _format_boxes(self, boxes):
return getattr(self, f"_format_boxes_{self.data_format.value}")(boxes)
def _format_boxes_ini(self, boxes):
boxes_ini = configparser.ConfigParser()
for box in boxes:
boxes_ini.add_section(box["name"])
if box.get("public_ip") is None:
box["public_ip"] = "(pending)"
for key, value in box.items():
if value is None:
continue
boxes_ini.set(box["name"], str(key), str(value))
buf = io.StringIO()
boxes_ini.write(buf)
buf.seek(0)
return buf.read()
def _format_boxes_json(self, boxes):
return json.dumps({"boxes": {box["name"]: box for box in boxes}}, indent=2)
def _format_keys(self, keys):
return getattr(self, f"_format_keys_{self.data_format.value}")(keys)
def _format_keys_ini(self, keys):
keys_ini = configparser.ConfigParser()
for i, key in enumerate(keys):
key_alias = key.get("alias", f"unaliased-key-{i}")
keys_ini.add_section(key_alias)
for attr, value in key.items():
if value is None:
continue
keys_ini.set(key_alias, str(attr), str(value))
buf = io.StringIO()
keys_ini.write(buf)
buf.seek(0)
return buf.read()
def _format_keys_json(self, keys):
return json.dumps({"keys": keys}, indent=2)
def _format_image_aliases(self, image_aliases):
return getattr(self, f"_format_image_aliases_{self.data_format.value}")(
image_aliases
)
def _format_image_aliases_ini(self, image_aliases):
image_aliases_ini = configparser.ConfigParser()
image_aliases_ini.add_section("image_aliases")
for alias, ami in sorted(image_aliases.items()):
image_aliases_ini.set("image_aliases", alias, ami)
buf = io.StringIO()
image_aliases_ini.write(buf)
buf.seek(0)
return buf.read()
def _format_image_aliases_json(self, image_aliases):
return json.dumps({"image_aliases": dict(image_aliases)}, indent=2)
def _format_valid_ttl(self, ttl):
return getattr(self, f"_format_valid_ttl_{self.data_format.value}")(ttl)
def _format_valid_ttl_ini(self, ttl):
ttl_ini = configparser.ConfigParser()
ttl_ini.add_section("ttl")
ttl_ini.set("ttl", "str", str(ttl))
ttl_ini.set("ttl", "float", str(ttl.total_seconds()))
buf = io.StringIO()
ttl_ini.write(buf)
buf.seek(0)
return buf.read()
def _format_valid_ttl_json(self, ttl):
return json.dumps(
{"ttl": {"str": str(ttl), "float": str(ttl.total_seconds())}}, indent=2
)
@classmethod
def _guess_ssh_user(cls, image_alias, default=default_ssh_user):
image_alias = image_alias.lower()
for prefix, user in cls.default_ssh_users.items():
if image_alias.startswith(prefix):
return user
return default
if __name__ == "__main__": # pragma: no cover
sys.exit(main())
|
import asyncio
from .middleware import MiddlewareManager
class ResultMiddlewareManager(MiddlewareManager):
"""
Responsibilities:
* Execute all middlewares that operate on incoming results (output from Spider).
.. method:: process_item(item, logger, spider)
This method is called for each result produced by the spider.
* run middleware on results from spider.
* return the item for further processing.
"""
name = 'result middleware'
def _add_middleware(self, pipe):
super()._add_middleware(pipe)
if hasattr(pipe, 'process_item'):
self.methods['process_item'].append(pipe.process_item)
async def process_item(self, item, logger, spider):
logger.debug("Handling item: {} (from: {})".format(item, spider.name))
async def process_chain(item):
for method in self.methods['process_item']:
item = method(item=item, spider=spider)
return item
return await process_chain(item)
|
#!/usr/bin/python3
import simpy
from workload import Workload
from scheduler import Scheduler
from cluster import Cluster
import argparse
import yaml
import json
from os import path
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='path to the configuration file for running the simulation.',
type=str, default='./configs/e2ebenchmark.yaml')
args = parser.parse_args()
print(args.config)
params = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
print(json.dumps(params, indent=4))
cluster_conf_path = params['cluster']['configs']
cluster_configs = yaml.load(open(cluster_conf_path, 'r'), Loader=yaml.FullLoader)
print(params['benchmark'])
statistics_fpath = params['benchmark']['statistics']
with open(statistics_fpath, 'w') as fd:
# # write the header
fd.write('appname,scheduler,end2end,remote_read,local_read,transimit(s),compute(s),deseriation(s),serialization(s),task_time\n')
for sched_policy in params['cluster']['policy']['scheduling']:
cluster_configs['cluster']['scheduling'] = sched_policy
for ser_policy in params['cluster']['policy']['serialization']:
cluster_configs['cluster']['serialization'] = ser_policy
for workload in params['benchmark']['workloads']:
cluster_configs['benchmark']['workloads'] = [workload]
yaml.dump(cluster_configs, open(cluster_conf_path, 'w'))
result = subprocess.run(['/local0/serverless/serverless-sim/run.py', '--config', cluster_conf_path])
#print(json.dumps(cluster_configs, indent=4)km )
#break
#break
#break
|
import os
import sys
import argparse
import numpy as np
import vespa.common.util.export as util_export
import vespa.common.util.time_ as util_time
import vespa.common.wx_gravy.common_dialogs as common_dialogs
import vespa.common.mrs_data_raw as mrs_data_raw
DESC = \
"""This utility converts single voxel MRS data from text file format
into the Vespa VIFF file format readable by Vespa-Analysis.
The text file should have two columns of numbers separated
by whitespace. Column one will contain the real data values
and column two will contain the imaginary data values. One
data point will be on each line. The number of lines in the
text file (that are not comments) that have data points will
be assumed to be the number of points in SVS MRS data.
Use the command line flags to set SW, FREQ, RESPPM, MIDPPM,
and NoWater settings for the VIFF file.
"""
def convert_ascii_to_viff(fin, sw=6002.4, freq=123.7, resppm=4.7, scale=1.0, fout='', haswater=False, normalize=False, verbose=False):
if not fout:
fout = fin
fmetab = os.path.splitext(fout)[0] + "_metab.xml"
fwater = os.path.splitext(fout)[0] + "_water.xml"
# Read in entire file
with open(fin) as f:
all_lines = f.read().splitlines()
data = []
hdr = []
for line in all_lines:
# get rid of whitespace and check for 'comment' character in front
if line.lstrip().startswith('#'):
hdr.append(line)
else:
data.append(line)
dim0 = len(data)
metab = np.ndarray((dim0,),complex)
water = np.ndarray((dim0,),complex)
for i,line in enumerate(data):
vals = [float(item) for item in line.split()]
metab[i] = vals[0] + 1j*vals[1]
if haswater:
water[i] = vals[2] + 1j*vals[3]
water[0] *= 2.0 # need this because Analysis multiplies all FID data first
metab[0] *= 2.0 # data points by 0.5 and they made this data up funny.
if normalize:
if np.round(np.abs(water[0])) != 0.0:
water = water / np.abs(water[0])
metab = metab / np.abs(metab[0])
water = water * scale
metab = metab * scale
metab.shape = 1,1,1,dim0
water.shape = 1,1,1,dim0
# Save water and metabolite data to files
stamp = util_time.now(util_time.ISO_TIMESTAMP_FORMAT).split('T')
lines = ['Convert_ASCII_to_VIFF ']
lines.append('------------------------------------------------')
lines.append('The following information is a summary of the enclosed MRS data.')
lines.append(' ')
lines.append('Creation_date - '+stamp[0])
lines.append('Creation_time - '+stamp[1])
lines.append(' ')
lines.append('ASCII File - '+fin)
lines.append('VIFF File base - '+fout)
lines.append(' ')
lines.append('Frequency [MHz] '+str(freq))
lines.append('Sweep width [Hz] '+str(sw))
lines.append('Number of points '+str(dim0))
lines.append('Resonance PPM '+str(resppm))
lines.append(' ')
lines.append('------------------------------------------------')
lines.append('ASCII Comment Lines')
lines.append(' ')
lines = lines + hdr
lines = "\n".join(lines)
if (sys.platform == "win32"):
lines = lines.replace("\n", "\r\n")
msg = ''
if haswater:
wat = mrs_data_raw.DataRaw()
wat.data_sources = [fwater]
wat.headers = [lines]
wat.sw = sw
wat.frequency = freq
wat.resppm = resppm
wat.data = water
filename = fwater
try:
util_export.export(filename, [wat], None, lines, False)
except IOError:
msg = """I can't write the file "%s".""" % filename
if msg:
common_dialogs.message(msg, style=common_dialogs.E_OK)
return
met = mrs_data_raw.DataRaw()
met.data_sources = [fmetab]
met.headers = [lines]
met.sw = sw
met.frequency = freq
met.resppm = resppm
met.data = metab
filename = fmetab
try:
util_export.export(filename, [met], None, lines, False)
except IOError:
msg = """I can't write the file "%s".""" % filename
if msg:
common_dialogs.message(msg, style=common_dialogs.E_OK)
return
#------------------------------------------------------------------------------
# Test routines
def bjs_float(x):
x = float(x)
return x
def create_parser():
parser = argparse.ArgumentParser(prog='convert_ascii2viff',
usage='%(prog)s [options]',
description=DESC)
parser.add_argument('infile', nargs='?', default=None,
help='name of ASCII file to be processed')
parser.add_argument('-s', '--sw', type=float, dest='sw', default=6002.4, help='float, sweep width in Hz ')
parser.add_argument('-f', '--freq', type=float, dest='freq', default=123.7, help='float, center frequency in MHz ')
parser.add_argument('-r', '--resppm', type=float, dest='resppm', default=4.7,help='float, on-resonance PPM value ')
parser.add_argument('-c', '--scale', type=float, dest='scale', default=1.0,help='float, scaling factor applied to FID data')
parser.add_argument('-w', '--haswater', dest='haswater', action="store_true",
help='flag, whether columns 3 and 4 contain water unsuppressed FID data on each line ')
parser.add_argument('-n', '--normalize', dest='normalize', action="store_true",
help='flag, normalize by first point of FID ')
parser.add_argument('-d', '--indir', dest='indir', action="store_true",
help='flag, whether infile should be treated as a directory to be parsed ')
parser.add_argument('-v', '--verbose', dest='verbose', action="store_true",
help='increase output verbosity')
return parser
def main():
import glob
parser = create_parser()
args = parser.parse_args()
msg = ''
if args.infile is None:
msg = "The 'infile' file name argument is always required. "
if msg:
raise parser.error(msg)
if args.indir:
os.chdir(args.infile)
filelist = glob.glob('./*.txt')
else:
filelist = [args.infile,]
for item in filelist:
convert_ascii_to_viff(item, sw=args.sw,
freq=args.freq,
resppm=args.resppm,
scale=args.scale,
haswater=args.haswater,
normalize=args.normalize,
verbose=args.verbose)
if __name__ == '__main__':
main()
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from itertools import count
from typing import Iterable, Iterator, List, Union
from .event import ActualEvent, AtomEvent, CascadeEvent
from .event_linked_list import EventLinkedList
from .event_state import EventState
def _pop(cntr: List[ActualEvent], event_cls_type: type) -> ActualEvent:
"""Pop an event from related pool, generate buffer events if not enough."""
return event_cls_type(None, None, None, None) if len(cntr) == 0 else cntr.pop()
class EventPool:
"""Event pool used to generate and pool event object.
The pooling function is disabled by default, then it is used as an Event generator with a buffer.
When enable pooling, it will recycle events.
"""
def __init__(self):
self._atom_events: List[AtomEvent] = []
self._cascade_events: List[CascadeEvent] = []
self._event_count: Iterator[int] = count()
@property
def atom_event_count(self) -> int:
return len(self._atom_events)
@property
def cascade_event_count(self) -> int:
return len(self._cascade_events)
def gen(
self, tick: int, event_type: object, payload: object,
is_cascade: bool = False
) -> ActualEvent:
"""Generate an event.
Args:
tick (int): Tick of the event will be trigger.
event_type (object): Type of new event.
payload (object): Payload attached to this event.
is_cascade (bool): Is the new event is cascade event.
Returns:
Event: AtomEvent or CascadeEvent instance.
"""
event = _pop(self._cascade_events, CascadeEvent) if is_cascade else _pop(self._atom_events, AtomEvent)
event.reset_value(
id=next(self._event_count), tick=tick, event_type=event_type,
payload=payload, state=EventState.PENDING
)
return event
def recycle(self, events: Union[ActualEvent, List[ActualEvent], EventLinkedList]) -> None:
"""Recycle specified event for further using.
Args:
events (Union[Event, EventList]): Event object(s) to recycle.
"""
self._append(events) if isinstance(events, ActualEvent) else self._extend(events)
def _extend(self, events: Iterable[ActualEvent]) -> None:
for event in events:
self._append(event)
def _append(self, event: ActualEvent) -> None:
"""Append event to related pool"""
if isinstance(event, ActualEvent):
# Detach the payload before recycle.
event.payload = None
event.next_event = None
event.state = EventState.RECYCLING
if isinstance(event, CascadeEvent):
self._cascade_events.append(event)
else:
assert isinstance(event, AtomEvent)
self._atom_events.append(event)
|
from setuptools import setup, find_namespace_packages
setup(
name="nmcipher.cli",
version="0.0.6",
author="Neil Marshall",
author_email="neil.marshall@dunelm.org.uk",
description="A command line parser package for use with basic encryption algorithms",
packages=find_namespace_packages(include=["nmcipher.*"], exclude=["nmcipher.tests"]),
install_requires=["nmcipher.affine", "nmcipher.caesar", "nmcipher.transposition"],
python_requires='>=3.8',
entry_points={
"console_scripts": [
"affine = nmcipher.cli:affine",
"caesar = nmcipher.cli:caesar",
"transposition=nmcipher.cli:transposition"
]
}
)
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
from ..commonlib.collection_util import is_empty, unzip
from .clr_types import AbstractTraceGC
from .enums import (
GCGlobalMechanisms,
gc_reason,
GCType,
try_get_gc_heap_compact_reason,
try_get_gc_heap_expand_mechanism,
)
from .types import (
EMPTY_MECHANISMS_AND_REASONS,
MechanismsAndReasons,
ProcessInfo,
union_all_mechanisms,
)
def get_mechanisms_and_reasons_for_process_info(proc: ProcessInfo) -> MechanismsAndReasons:
res = union_all_mechanisms(
_get_seen_mechanisms_and_reasons_for_single_gc(gc) for gc in proc.gcs
)
assert res.is_empty() == is_empty(proc.gcs)
return res
def _get_seen_mechanisms_and_reasons_for_single_gc(gc: AbstractTraceGC) -> MechanismsAndReasons:
ghh = gc.GlobalHeapHistory
if ghh is None:
return EMPTY_MECHANISMS_AND_REASONS
else:
expand, compact = unzip(
(
try_get_gc_heap_expand_mechanism(phh.ExpandMechanisms),
try_get_gc_heap_compact_reason(phh.CompactMechanisms),
)
for phh in gc.PerHeapHistories
)
return MechanismsAndReasons(
types=frozenset((GCType(gc.Type),)),
mechanisms=GCGlobalMechanisms(ghh.GlobalMechanisms),
reasons=frozenset((gc_reason(gc.Reason),)),
# TODO: these aren't available on the individual GC.
# See comment in 'GetTracedProcesses' in managed-lib/Analysis.cs
heap_expand=frozenset(x for x in expand if x is not None),
heap_compact=frozenset(x for x in compact if x is not None),
)
|
from setuptools import setup, find_packages
requires = [
'pyramid',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'waitress'
]
setup(name='abomid',
packages=find_packages(),
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = abomid:main
""",
)
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread(r"..\lena.jpg", 0)
template = cv2.imread(r"..\lena_eyes.png", 0)
tw, th = template.shape[::-1]
rv = cv2.matchTemplate(img, template, cv2.TM_CCOEFF)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(rv)
topLeft = maxLoc
bottomRight = (topLeft[0] + tw, topLeft[1] + th)
cv2.rectangle(img, topLeft, bottomRight, 252, 2)
plt.subplot(1, 2, 1)
plt.imshow(rv, cmap='gray')
plt.title('Matching Result')
plt.xticks([])
plt.yticks([])
plt.subplot(1, 2, 2)
plt.imshow(img, cmap='gray')
plt.title('Detected Point')
plt.xticks([])
plt.yticks([])
plt.show() |
#!/usr/bin/env python3
from pyautogui import alert as pag_alert
from pyautogui import click, position, rightClick, moveTo
from time import sleep
pag_alert("Ok?")
prev_pos = position()
click(282, 132)
sleep(1)
rightClick(1370, 229)
click(1350, 229)
moveTo(prev_pos)
|
# Copyright (c) 2018 Danilo Vargas <danilo.vargas@csiete.org>
# See the COPYRIGHT file for more information
from __future__ import annotations
import os
from cowrie.shell.command import HoneyPotCommand
from cowrie.shell.fs import A_NAME
commands = {}
class Command_du(HoneyPotCommand):
def message_help(self):
return """Usage: du [OPTION]... [FILE]...
or: du [OPTION]... --files0-from=F
Summarize disk usage of the set of FILEs, recursively for directories.
Mandatory arguments to long options are mandatory for short options too.
-0, --null end each output line with NUL, not newline
-a, --all write counts for all files, not just directories
--apparent-size print apparent sizes, rather than disk usage; although
the apparent size is usually smaller, it may be
larger due to holes in ('sparse') files, internal
fragmentation, indirect blocks, and the like
-B, --block-size=SIZE scale sizes by SIZE before printing them; e.g.,
'-BM' prints sizes in units of 1,048,576 bytes;
see SIZE format below
-b, --bytes equivalent to '--apparent-size --block-size=1'
-c, --total produce a grand total
-D, --dereference-args dereference only symlinks that are listed on the
command line
-d, --max-depth=N print the total for a directory (or file, with --all)
only if it is N or fewer levels below the command
line argument; --max-depth=0 is the same as
--summarize
--files0-from=F summarize disk usage of the
NUL-terminated file names specified in file F;
if F is -, then read names from standard input
-H equivalent to --dereference-args (-D)
-h, --human-readable print sizes in human readable format (e.g., 1K 234M 2G)
--inodes list inode usage information instead of block usage
-k like --block-size=1K
-L, --dereference dereference all symbolic links
-l, --count-links count sizes many times if hard linked
-m like --block-size=1M
-P, --no-dereference don't follow any symbolic links (this is the default)
-S, --separate-dirs for directories do not include size of subdirectories
--si like -h, but use powers of 1000 not 1024
-s, --summarize display only a total for each argument
-t, --threshold=SIZE exclude entries smaller than SIZE if positive,
or entries greater than SIZE if negative
--time show time of the last modification of any file in the
directory, or any of its subdirectories
--time=WORD show time as WORD instead of modification time:
atime, access, use, ctime or status
--time-style=STYLE show times using STYLE, which can be:
full-iso, long-iso, iso, or +FORMAT;
FORMAT is interpreted like in 'date'
-X, --exclude-from=FILE exclude files that match any pattern in FILE
--exclude=PATTERN exclude files that match PATTERN
-x, --one-file-system skip directories on different file systems
--help display this help and exit
--version output version information and exit
Display values are in units of the first available SIZE from --block-size,
and the DU_BLOCK_SIZE, BLOCK_SIZE and BLOCKSIZE environment variables.
Otherwise, units default to 1024 bytes (or 512 if POSIXLY_CORRECT is set).
The SIZE argument is an integer and optional unit (example: 10K is 10*1024).
Units are K,M,G,T,P,E,Z,Y (powers of 1024) or KB,MB,... (powers of 1000).
GNU coreutils online help: <http://www.gnu.org/software/coreutils/>
Report du translation bugs to <http://translationproject.org/team/>
Full documentation at: <http://www.gnu.org/software/coreutils/du>
or available locally via: info '(coreutils) du invocation'\n"""
def call(self):
self.showHidden = False
self.showDirectories = False
path = self.protocol.cwd
args = self.args
if args:
if "-sh" == args[0]:
self.write("28K .\n")
elif "--help" == args[0]:
self.write(self.message_help())
else:
self.du_show(path)
else:
self.du_show(path, all=True)
def du_show(self, path, all=False):
try:
if self.protocol.fs.isdir(path) and not self.showDirectories:
files = self.protocol.fs.get_path(path)[:]
if self.showHidden:
dot = self.protocol.fs.getfile(path)[:]
dot[A_NAME] = "."
files.append(dot)
# FIXME: should grab dotdot off the parent instead
dotdot = self.protocol.fs.getfile(path)[:]
dotdot[A_NAME] = ".."
files.append(dotdot)
else:
files = [x for x in files if not x[A_NAME].startswith(".")]
files.sort()
else:
files = (self.protocol.fs.getfile(path)[:],)
except Exception:
self.write(f"ls: cannot access {path}: No such file or directory\n")
return
filenames = [x[A_NAME] for x in files]
if not filenames:
return
for filename in filenames:
if all:
isdir = self.protocol.fs.isdir(os.path.join(path, filename))
if isdir:
filename = f"4 ./{filename}\n"
self.write(filename)
else:
filename = f"4 {filename}\n"
self.write(filename)
if all:
self.write("36 .\n")
commands["du"] = Command_du
|
from SimpleTextureAtlas import make_atlas
from PIL import Image
import os
def list_files(directory):
for path in os.listdir(directory):
path = os.path.join(directory, path)
if os.path.isfile(path):
yield path
if os.path.isdir(path):
for new_path in list_files(path):
yield new_path
def pad(image, padding):
size = (image.width + 2*padding, image.height + 2*padding)
padded_image = Image.new("RGBA", size, (0,0,0,0))
padded_image.paste(image, (padding, padding))
return padded_image
# find paths of all files in directory
paths = list(list_files("example_images"))
# load images from paths
images = [Image.open(path) for path in paths]
# optional
if False:
# crop images by removing zero-valued pixels
images = [image.crop(image.getbbox()) for image in images]
# optional
if False:
# add padding to images
padding = 5
images = [pad(image, padding) for image in images]
else:
padding = 0
# make atlas from images
atlas, offsets = make_atlas(images)
# print info
for i in range(len(images)):
x,y = offsets[i]
image = images[i]
x += padding
y += padding
print('place image "%s" of size %dx%d at (%d, %d)'
%(paths[i], image.width, image.height, x, y))
atlas.show()
#atlas.save("atlas.png")
|
#!/usr/bin/env python
LOWER_ALPHA = "abcdefghijklmnopqrstuvwxyz"
UPPER_ALPHA = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
ALPHA = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
LOWER_ACCENTS = "âêîôûäëïöüàùèéç"
UPPER_ACCENTS = "ÂÊÎÔÛÄËÏÖÜÀÙÈÉÇ"
ACCENTS = "âêîôûäëïöüàùèéçÂÊÎÔÛÄËÏÖÜÀÙÈÉÇ"
LOWER_ACCENTS_TRANS = str.maketrans(LOWER_ACCENTS, "aeiouaeiouaueec")
UPPER_ACCENTS_TRANS = str.maketrans(UPPER_ACCENTS, "AEIOUAEIOUAUEEC")
ACCENTS_TRANS = LOWER_ACCENTS_TRANS | UPPER_ACCENTS_TRANS
NUMS = "0123456789"
PUNCTS = ".,:;?!"
QUOTES = "\"'`"
BRACKETS = "(){}[]"
# \n -> line feed (LF)
# \r -> carriage return (CR)
# \t -> tab (TAB)
# \v -> vertical tab (VT)
# \f -> formfeed (FF)
# \b -> backspace (BS)
SPACES = " \n\r\t\v\f\b" |
from economic.account_entries import AccountEntry
from economic.query import QueryMixin
from economic.serializer import EconomicSerializer
class AccountingYear(EconomicSerializer, QueryMixin):
base_url = "https://restapi.e-conomic.com/accounting-years/"
def __unicode__(self):
return u"Accounting year %s:" % self.year
def is_closed(self):
if 'closed' in self.valid_fields:
if self.closed:
return True
return False
def get_account_entries(self, limit=None):
# self.entries is the URL for this AccountingYear's entries
# we have to remove the query parameters from the URL first, since they are added again by _query
return AccountEntry._query(self.auth, self.entries.split('?')[0], limit=limit)
|
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import numpy as np
import time
import argparse
from lib.in_subject_cross_validation import cross_validate_dataset
##
# cross_validate_per_subject_rfe: Performs cross validation on a per-subject basis where 19 sessions are used for
# training and the 20th is used to test.
##
# The number of attributes that should be passed on to the classifier for each class.
attribute_counts = {
'koelstra-approach': {
0: 25,
1: 21,
2: 10
},
'koelstra-normalized': {
0: 8,
1: 7,
2: 36
},
'au-counts': {
0: 18,
1: 16,
2: 4
},
'mahnob-au+face': {
0: 30
},
'mahnob-hr': {
0: 12,
1: 5,
2: 11
},
'mahnob-hr-au-count': {
0: 16,
1: 36,
2: 41
}
}
def print_report(actual, class_id, dataset, predicted):
conf_matrix = confusion_matrix(actual, predicted, ['low', 'high'])
print ""
print conf_matrix
scores = f1_score(actual, predicted, ['low', 'high'], 'low', average=None)
average_f1 = np.average(scores)
accuracy = accuracy_score(actual, predicted)
#print [1 if actual[idx] == predicted[idx] else 0 for (idx, _) in enumerate(actual)]
print "\nAverage F1 score: %.3f" % average_f1
print "Average accuracy: %.3f" % accuracy
print "Low F1 score: %.3f" % scores[0]
print "High F1 score: %.3f" % scores[1]
low_ratings = [p for (idx, p) in enumerate(predicted) if actual[idx] == 'low']
high_ratings = [p for (idx, p) in enumerate(predicted) if actual[idx] == 'high']
print "Low accuracy: %.3f" % (float(low_ratings.count('low')) / len(low_ratings))
print "High accuracy: %.3f" % (float(high_ratings.count('high')) / len(high_ratings))
attr_names = ["valence", "arousal", "control"]
print "%s,leave-one-session-out-rfe,%s,%s,%.3f,%.3f" % (
dataset, attr_names[class_id], time.strftime('%Y-%m-%d'), average_f1, accuracy)
def main():
parser = argparse.ArgumentParser(
description='Perform cross-validation on the dataset, cross-validating the behavior of all but one subjects '
'using an SVM classifier.'
)
parser.add_argument('dataset', help='name of the dataset folder')
parser.add_argument('class_id', type=int, help='target class id, 0-2')
parser.add_argument('ground_truth_count', type=int, help='number of ground truth values, 1-3')
parser.add_argument('rfe', type=int, default=1, help='perform RFE?')
args = parser.parse_args()
print args
attr_count = None if args.rfe == 0 else attribute_counts[args.dataset][args.class_id]
actual, predicted = cross_validate_dataset(args.dataset, args.class_id, attribute_count=attr_count,
ground_truth_count=args.ground_truth_count)
print_report(actual, args.class_id, args.dataset, predicted)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
@author: WZM
@time: 2021/1/29 15:44
@function:
"""
import torch
import torch.nn as nn
import torchvision
# print("PyTorch Version: ", torch.__version__)
# print("Torchvision Version: ", torchvision.__version__)
__all__ = ['DenseNet121', 'DenseNet169', 'DenseNet201', 'DenseNet264']
def Conv1(in_planes, places, stride=2):
return nn.Sequential(
nn.Conv2d(in_channels=in_planes, out_channels=places, kernel_size=7, stride=stride, padding=3, bias=False),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
class _TransitionLayer(nn.Module):
def __init__(self, inplace, plance):
super(_TransitionLayer, self).__init__()
self.transition_layer = nn.Sequential(
nn.BatchNorm2d(inplace),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=inplace, out_channels=plance, kernel_size=1, stride=1, padding=0, bias=False),
nn.AvgPool2d(kernel_size=2, stride=2),
)
def forward(self, x):
return self.transition_layer(x)
class _DenseLayer(nn.Module):
def __init__(self, inplace, growth_rate, bn_size, drop_rate=0):
super(_DenseLayer, self).__init__()
self.drop_rate = drop_rate
self.dense_layer = nn.Sequential(
nn.BatchNorm2d(inplace),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=inplace, out_channels=bn_size * growth_rate, kernel_size=1, stride=1, padding=0,
bias=False),
nn.BatchNorm2d(bn_size * growth_rate),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=bn_size * growth_rate, out_channels=growth_rate, kernel_size=3, stride=1, padding=1,
bias=False),
)
self.dropout = nn.Dropout(p=self.drop_rate)
def forward(self, x):
y = self.dense_layer(x)
if self.drop_rate > 0:
y = self.dropout(y)
return torch.cat([x, y], 1)
class DenseBlock(nn.Module):
def __init__(self, num_layers, inplances, growth_rate, bn_size, drop_rate=0):
super(DenseBlock, self).__init__()
layers = []
for i in range(num_layers):
layers.append(_DenseLayer(inplances + i * growth_rate, growth_rate, bn_size, drop_rate))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class DenseNet(nn.Module):
def __init__(self, init_channels=64, growth_rate=32, blocks=[6, 12, 24, 16], num_classes=3):
super(DenseNet, self).__init__()
bn_size = 4
drop_rate = 0
self.conv1 = Conv1(in_planes=3, places=init_channels)
self.conv1_1 = Conv1(in_planes=1, places=init_channels)
num_features = init_channels
self.layer1 = DenseBlock(num_layers=blocks[0], inplances=num_features, growth_rate=growth_rate, bn_size=bn_size,
drop_rate=drop_rate)
num_features = num_features + blocks[0] * growth_rate
self.transition1 = _TransitionLayer(inplace=num_features, plance=num_features // 2)
num_features = num_features // 2
self.layer2 = DenseBlock(num_layers=blocks[1], inplances=num_features, growth_rate=growth_rate, bn_size=bn_size,
drop_rate=drop_rate)
num_features = num_features + blocks[1] * growth_rate
self.transition2 = _TransitionLayer(inplace=num_features, plance=num_features // 2)
num_features = num_features // 2
self.layer3 = DenseBlock(num_layers=blocks[2], inplances=num_features, growth_rate=growth_rate, bn_size=bn_size,
drop_rate=drop_rate)
num_features = num_features + blocks[2] * growth_rate
self.transition3 = _TransitionLayer(inplace=num_features, plance=num_features // 2)
num_features = num_features // 2
self.layer4 = DenseBlock(num_layers=blocks[3], inplances=num_features, growth_rate=growth_rate, bn_size=bn_size,
drop_rate=drop_rate)
num_features = num_features + blocks[3] * growth_rate
self.avgpool = nn.AvgPool2d(4, stride=1)
self.fc = nn.Linear(num_features, num_classes)
def forward_tmp(self, x, channels=3):
if channels==3:
x = self.conv1(x)
else:
x = self.conv1_1(x)
x = self.layer1(x)
x = self.transition1(x)
x = self.layer2(x)
x = self.transition2(x)
x = self.layer3(x)
x = self.transition3(x)
x = self.layer4(x)
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
return x
class DenseNet121(nn.Module):
def __init__(self, num_classes=3):
super(DenseNet121, self).__init__()
self.net = DenseNet(init_channels=64, growth_rate=32, blocks=[6, 12, 24, 16])
self.conv1_fusion = nn.Conv2d(3072, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(256)
self.relu1_fusion = nn.ReLU(inplace=True)
self.conv2_fusion = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(128)
self.relu2_fusion = nn.ReLU(inplace=True)
self.downsample = nn.Sequential(
nn.Conv2d(3072, 128, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(128),
)
self.conv_fusion = nn.Sequential(
nn.Conv2d(128, 64, kernel_size=1, stride=1, padding=0, bias=False),
nn.ReLU(inplace=True),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(64 * 2 * 2, 512),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(512, 256),
nn.ReLU(inplace=True),
nn.Linear(256, num_classes),
)
def forward(self, x1, x2, x3, IsUseRGB=1):
"""
:param x1: [16, 3, 128, 128]
:param x2: [16, 1, 128, 128]
:param x3: [16, 1, 128, 128]
:param IsUseRGB:
:return:
"""
# x1---image ; x2-----dem ; x3 ----slope
# if IsUseRGB == 1:
# x1 = self.features1(x1)
# else:
# x1 = self.features11(x1)
x1 = self.net.forward_tmp(x1) # [16, 1024, 4, 4]
x2 = self.net.forward_tmp(x2, 1) # [16, 1024, 4, 4]
x3 = self.net.forward_tmp(x3, 1) # [16, 1024, 4, 4]
x = torch.cat((x1, x2, x3), 1) # [16, 3072, 4, 4]
h = self.conv1_fusion(x)
h = self.bn1(h)
h = self.relu1_fusion(h)
h = self.conv2_fusion(h)
h = self.bn2(h)
h += self.downsample(x)
h = self.relu1_fusion(h)
h = self.conv_fusion(h)
# print(h.shape)
h = h.view(h.size(0), -1)
h = self.classifier(h)
return h
def DenseNet169():
return DenseNet(init_channels=64, growth_rate=32, blocks=[6, 12, 32, 32])
def DenseNet201():
return DenseNet(init_channels=64, growth_rate=32, blocks=[6, 12, 48, 32])
def DenseNet264():
return DenseNet(init_channels=64, growth_rate=32, blocks=[6, 12, 64, 48])
if __name__ == '__main__':
# model = torchvision.models.densenet121()
model = DenseNet121()
print(model)
# out = model(torch.randn(8, 3, 128, 128),torch.randn(8, 1, 128, 128), torch.randn(8, 1, 128, 128))
out = model(torch.randn(8, 3, 64, 64), torch.randn(8, 1, 64, 64), torch.randn(8, 1, 64, 64))
print(out.shape)
|
#
# Copyright (c) 2021 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
# util.py
#
# Cleaning utilities for finding errors in varied corpora
#
import numpy as np
import pandas as pd
import sklearn.random_projection
import sklearn.pipeline
import sklearn.linear_model
import sklearn.metrics
import transformers
import text_extensions_for_pandas as tp
# Always run with the latest version of Text Extensions for Pandas
import importlib
tp = importlib.reload(tp)
from typing import *
def train_reduced_model(
x_values: np.ndarray,
y_values: np.ndarray,
n_components: int,
seed: int,
max_iter: int = 10000,
) -> sklearn.base.BaseEstimator:
"""
Train a reduced-quality model by putting a Gaussian random projection in
front of the multinomial logistic regression stage of the pipeline.
:param x_values: input embeddings for training set
:param y_values: integer labels corresponding to embeddings
:param n_components: Number of dimensions to reduce the embeddings to
:param seed: Random seed to drive Gaussian random projection
:param max_iter: Maximum number of iterations of L-BGFS to run. The default
value of 10000 will achieve a tight fit but takes a while.
:returns A model (Python object with a `predict()` method) fit on the
input training data with the specified level of dimension reduction
by random projection.
"""
reduce_pipeline = sklearn.pipeline.Pipeline(
[
(
"dimred",
sklearn.random_projection.GaussianRandomProjection(
n_components=n_components, random_state=seed
),
),
(
"mlogreg",
sklearn.linear_model.LogisticRegression(
multi_class="multinomial", max_iter=max_iter
),
),
]
)
print(f"Training model with n_components={n_components} and seed={seed}.")
return reduce_pipeline.fit(x_values, y_values)
def train_model_ensemble(
training_data: pd.DataFrame,
labels_col: str,
x_feats_col: str = "embedding",
model_sizes=None,
model_seeds=None,
max_iters=10000,
):
"""
Train an ensemble of reduced-quality models by putting a Gaussian
random projection in front of the multinomial logistic regression
stage of the pipelines for a set of models
two lists are given of model sizes and seeds, and the power set
of the two is the complete set ofparameters used to train the models
Uses Ray to speed up model training.
:param training_data: a dataframe containing the bert embeddings and
labels for the models to train on.
:param labels_col: the name of the column containing the labels for the model
to train on
:param x_feats_col: the name of the column containing the BERT embeddings
for each token, off which the model trains
:param model_sizes: the number of components that the gaussian random progression
reduces the BERT embedding to.
:param model_seeds: seeds for the random initialization of the model.
:param max_iters: the upper bound on the number of iterations to allow
the models to train. 100 is fast and 10,000 typically means full convergence
:returns: A dictionary mapping model names to models (Python object with a
`predict()` method) fit on the input training data with the specified
level of dimension reduction by random projection.
"""
import ray # TODO: put a note about this in the docstring
# input logic
if model_sizes is None:
model_sizes = [32, 64, 128, 256]
model_sizes.reverse()
if model_seeds is None:
model_seeds = [1, 2, 3]
model_params = {
f"{size}_{seed}": (size, seed) for size in model_sizes for seed in model_seeds
}
# training data sets
X_train = training_data[x_feats_col].values
Y_train = training_data[labels_col]
# run ray
if ray.is_initialized():
ray.shutdown()
ray.init()
# wrapper func for ray reduced model training
@ray.remote
def train_reduced_model_task(
x_values: np.ndarray,
y_values: np.ndarray,
n_components: int,
seed: int,
max_iter: int = max_iters,
) -> sklearn.base.BaseEstimator:
return train_reduced_model(x_values, y_values, n_components, seed, max_iter)
# setup plasma
X_id = ray.put(X_train.to_numpy())
Y_id = ray.put(Y_train.to_numpy())
# run training
futures = [
train_reduced_model_task.remote(
X_id, Y_id, components, seed, max_iter=max_iters
)
for components, seed in model_params.values()
]
results = ray.get(futures)
# Clean up items we've added to Plasma and shut down ray
del X_id
del Y_id
ray.shutdown()
models = {name: model for name, model in zip(model_params.keys(), results)}
return models
def infer_on_df(
df: pd.DataFrame, id_to_class_dict, predictor, iob=False, embeddings_col="embedding"
):
"""
Takes a dataframe containing bert embeddings and a model trained on bert embeddings,
and runs inference on the dataframe. if IOB is specified, predicted id and type are
broken out from the raw probabilities given.
:param df: the document on which to perform inference; of the form output by the
`preprocess_documents` method of this module, and containing BERT embeddings,
references to fold and document numbers, as well as some column containing unique
identifiers for the raw tokenization of the document (i.e. `'raw_token_id'` field in
output DataFrames from `preprocess_documents`)
:param id_to_class_dict: Mapping from class ID to class name, as returned by
:func:`text_extensions_for_pandas.make_iob_tag_categories`
:param predictor: Python object with a `predict` method that accepts a
numpy array of embeddings.
:param iob: a boolean value, when set to true, additional logic for iob-formatted
classes is activated
:param embeddings_col: the column in `df` that contains BERT embeddings for that document
"""
result_df = df.copy()
raw_outputs = tp.TensorArray(predictor.predict_proba(result_df[embeddings_col]))
result_df["predicted_id"] = np.argmax(raw_outputs, axis=1)
result_df["predicted_class"] = result_df["predicted_id"].apply(
lambda p_id: id_to_class_dict[p_id]
)
if iob:
iobs, types = tp.io.conll.decode_class_labels(
result_df["predicted_class"].values
)
result_df["predicted_iob"] = iobs
result_df["predicted_type"] = types
result_df["raw_output"] = raw_outputs
return result_df
def infer_and_extract_raw_entites(
doc: pd.DataFrame,
id_to_class_dict,
predictor,
raw_span_id_col="raw_span_id",
fold_col="fold",
doc_col="doc_num",
agg_func=None,
keep_cols: List[str] = None,
):
"""
Takes a dataframe containing bert embeddings and a model trained on bert embeddings, and
runs inference on the dataframe. Then using references to the original spans, reconstucts
the predicted value of each token of the original tokenization.
:param doc: the document on which to perform inference; of the form output by the
`preprocess_documents` method of this module, and containing BERT embeddings, references to
fold and document numbers, as well as some column containing unique identifiers for the raw
tokenization of the document
:param id_to_class_dict: Mapping from class ID to class name, as returned by
:func:`text_extensions_for_pandas.make_iob_tag_categories`
:param predictor: Python object with a `predict` method that accepts a
numpy array of embeddings.
:param fold_col: the name of the column of `doc` containing the fold of each token
:param doc_col: the name of the column of `doc` containing the document number of each token
:param raw_span_id_col: the name of the column of `doc` containing some identifier of the raw
token that each bert token came from.
:param agg_func: if specified, a function that takes in a series of tensorArrays and returns a
pandas-compatible type; used to aggregate the predictions of multiple subtokens when
multiple subtokens all describe the same original token.
:param keep_cols: any column that you wish to be carried over to the output dataframe, by default
the column 'raw_span' is the only column to be carried over, if it exists.
"""
if agg_func is None:
def agg_func(series: pd.Series):
# util function for predicting the probabilities of each class when multiple sub-tokens are combined.
# this method assumes independence between subtoken classes and calculates the probabilities of
# all subtokens being the same class, then re-normalizes so the vector components sum to one again
vec = series.to_numpy().prod(axis=0)
if (
np.sum(vec) == 0
): # if we underflow, (only happens in rare cases) log everything and continue
mat = np.log2(series.to_numpy())
vec = mat.sum(axis=0)
vec -= np.logaddexp2.reduce(vec)
return np.exp2(vec)
return tp.TensorArray(vec / np.sum(vec))
# build aggregation fields
keep_cols = (
keep_cols
if keep_cols is not None
else [
"fold",
"doc_num",
"token_id",
"raw_span",
]
)
sort_cols = [
col for col in [fold_col, doc_col, raw_span_id_col] if col in doc.columns
]
keep_cols = [
c for c in keep_cols if c in doc.columns and c not in sort_cols
] # filter out cols not in df
aggby = {k: "first" for k in keep_cols}
aggby["raw_output"] = agg_func
df = doc[["embedding"] + keep_cols + sort_cols].copy()
# first, run inference
df.loc[:, "raw_output"] = tp.TensorArray(predictor.predict_proba(df["embedding"]))
# group by original tag
groupby = df.groupby(sort_cols)
results_df = groupby.agg(aggby).reset_index().sort_values(sort_cols)
# repeat translation
results_df["predicted_id"] = results_df.raw_output.apply(
lambda s: np.array(s).argmax()
)
results_df["predicted_class"] = results_df["predicted_id"].apply(
lambda p_id: id_to_class_dict[p_id]
)
return results_df
def infer_and_extract_entities_iob(
doc: pd.DataFrame,
raw_docs: Dict[str, List[pd.DataFrame]],
id_to_class_dict,
predictor,
span_col="span",
fold_col="fold",
doc_col="doc_num",
raw_docs_span_col_name="span",
predict_on_col="embedding",
):
"""
Takes a dataframe containing bert embeddings and a model trained on bert embeddings, and
runs inference on the dataframe. Then using a reference to the surface form of the document
converts the iob-type entities into entity spans, that align with the original tokenization
of the document.
**This method is designed specifically for IOB-formatted labels**
:param doc: the document on which to perform inference; of the form output by the
`preprocess_documents` method of this module, and containing BERT embeddings, references to
fold and document numbers, as well as some column representing the tokens of the doucment
:param raw_docs: Mapping from fold name ("train", "test", etc.) to
list of per-document DataFrames as produced by :func:`tp.io.conll.conll_2003_to_documents`.
thes DataFrames must contain the original tokenization in the form of text-extensions spans
:param id_to_class_dict: Mapping from class ID to class name, as returned by
:func:`text_extensions_for_pandas.make_iob_tag_categories`
:param predictor: Python object with a `predict` method that accepts a
numpy array of embeddings.
:param token_col: the name of the column of `doc` containing the surface tokens as spans
:param fold_col: the name of the column of `doc` containing the fold of each token
:param doc_col: the name of the column of `doc` containing the document number of each token
:param embedding: the name of the column of `doc` containing the BERT embedding of that token
:param raw_docs_span_col_name: the name of the column of the documents in `raw_docs` containing
the tokens of those documents as spans.
"""
df = doc.copy()
# construct raw text from dataframe
# first, run inference
predicted_df = infer_on_df(
df, id_to_class_dict, predictor, embeddings_col=predict_on_col, iob=True
)
# create predicted spans using inference
pred_dfs = []
for fold, doc_num in (
predicted_df[[fold_col, doc_col]]
.drop_duplicates()
.itertuples(index=False, name=None)
):
pred_doc = predicted_df[
(predicted_df[fold_col] == fold) & (predicted_df[doc_col] == doc_num)
].reset_index()
pred_spans = tp.io.conll.iob_to_spans(
pred_doc,
iob_col_name="predicted_iob",
span_col_name=span_col,
entity_type_col_name="predicted_type",
)
pred_spans.rename(columns={"predicted_type": "ent_type"}, inplace=True)
pred_aligned_doc = tp.io.bert.align_bert_tokens_to_corpus_tokens(
pred_spans, raw_docs[fold][doc_num].rename({raw_docs_span_col_name: "span"})
)
pred_aligned_doc[[fold_col, doc_col]] = [fold, doc_num]
pred_dfs.append(pred_aligned_doc)
result_df = pd.concat(pred_dfs)
return result_df
def combine_raw_spans_docs(
docs: Dict[str, List[pd.DataFrame]], iob_col, token_col, label_col
):
"""
Takes in multiple parts of a corpus and merges (i.e. train, test, validation)
into a single DataFrame containing all the entity spans in that corpus
This is specially intended for iob-formatted data, and converts iob labeled
elements to spans with labels.
:param docs: Mapping from fold name ("train", "test", etc.) to
list of per-document DataFrames as produced by :func:`tp.io.conll.conll_2003_to_documents`.
All DataFrames must contain a tokenization in the form of a span column.
:param iob_col: the name of the column of the datframe containing the iob portion of
the iob label. where all elements are labeled as either 'I','O' or 'B'
:param token_col: the name of the column of the datframe containing the surface tokens
of the document
:param label_col: the name of the column of the datframe containing the element type label
"""
docs_dict = {}
for fold in docs.keys():
docs_dict[fold] = [
tp.io.conll.iob_to_spans(
document,
iob_col_name=iob_col,
span_col_name=token_col,
entity_type_col_name=label_col,
)
for document in docs[fold]
]
return tp.io.conll.combine_folds(docs_dict)
def create_f1_score_report(
predicted_features: Dict[str, pd.DataFrame],
corpus_label_col: str,
predicted_label_col: str,
print_output: bool = False,
):
"""
Takes in a set of non-IOB formatted documents such as those returned by
`infer_and_extract_entities` as well as two column names and
"""
if print_output:
print(
sklearn.metrics.classification_report(
predicted_features[corpus_label_col],
predicted_features[predicted_label_col],
zero_division=0,
)
)
return pd.DataFrame(
sklearn.metrics.classification_report(
predicted_features[corpus_label_col],
predicted_features[predicted_label_col],
output_dict=True,
zero_division=0,
)
)
def create_f1_score_report_iob(
predicted_ents: pd.DataFrame,
corpus_ents: pd.DataFrame,
span_id_col_names: List[str] = ["fold", "doc_num", "span"],
entity_type_col_name: str = "ent_type",
simple: bool = False,
):
"""
Calculates precision, recall and F1 scores for the given predicted elements and model
entities. This function has two modes. In normal operation it calculates classs-wise
precision, recall and accuacy figures, as well as global averaged metrics, and r
eturns them as a pandas DataFrame In the 'Simple' mode, calculates micro averaged
precision recall and F1 scorereturns them as a dictionary.
:param predicted_ents: entities returned from the predictions of the model, in the
form of a pandas DataFrame, with one entity per line, and some sort of 'type' column
:param corpus_ents: the ground truth entities from the model, with one entity per line
and some sort of entity type columns
:param span_id_col_names: a list of column names which by themselves will be sufficent
to uniquely identify each entity by default `['fold', 'doc_num', 'span']` to be
compatible with outputs from `combine_raw_spans_docs`
and `infer_and_extract_entities_iob` from this module
:param entity_type_col_name: the name of a column in both entity DataFrames that identifies
the type of the element.
:param simple: by default `false`. If `false`, a full report is generated, for each entity
type with individual precisions, recalls and F1 scores, as well as averaged metrics
If `true`, an dictionary with three elements `'precision'` `'recall'` and `'F1 score'`
is returned.
:returns: If simple is `false`, a full report is generated, for each entity
type with individual precisions, recalls and F1 scores, as well as averaged metrics
If simple is `true`, an dictionary with three elements `'precision'` `'recall'` and
`'F1 score'` is returned.
:returns:
"""
# use an inner join to count the number of identical elts.
inner = predicted_ents.copy().merge(
corpus_ents, on=span_id_col_names + [entity_type_col_name], how="inner"
)
if simple:
res_dict = {}
res_dict["precision"] = inner.shape[0] / predicted_ents.shape[0]
res_dict["recall"] = inner.shape[0] / corpus_ents.shape[0]
res_dict["f1_score"] = (
2
* res_dict["precision"]
* res_dict["recall"]
/ (res_dict["precision"] + res_dict["recall"])
)
return res_dict
inner["true_positives"] = 1
inner_counts = inner.groupby(entity_type_col_name).agg({"true_positives": "count"})
pos = predicted_ents
pos["predicted_positives"] = 1
positive_counts = pos.groupby(entity_type_col_name).agg(
{"predicted_positives": "count"}
)
actuals = corpus_ents
actuals["actual_positives"] = 1
actual_counts = actuals.groupby(entity_type_col_name).agg(
{"actual_positives": "count"}
)
stats = pd.concat([inner_counts, positive_counts, actual_counts], axis=1)
# add micro average
micro = stats.sum()
micro.name = "Micro-avg"
stats = stats.append(micro)
# calc stuff
stats["precision"] = stats.true_positives / stats.predicted_positives
stats["recall"] = stats.true_positives / stats.actual_positives
# macro average
macro = stats.mean()
macro.name = "Macro-avg"
stats = stats.append(macro)
# f1 calc
stats["f1_score"] = (
2 * (stats.precision * stats.recall) / (stats.precision + stats.recall)
)
stats["support"] = stats["actual_positives"]
stats.loc["Micro-avg":"Macro-avg", "support"] = pd.NA
# return
stats = stats.drop(columns=[col for col in stats.columns if "positives" in col])
return stats
def flag_suspicious_labels(
predicted_features: Dict[str, pd.DataFrame],
corpus_label_col: str,
predicted_label_col: str,
label_name=None,
gold_feats: pd.DataFrame = None,
align_over_cols: List[str] = ["fold", "doc_num", "raw_span_id"],
keep_cols: List[str] = ["raw_span"],
):
"""
Takes in the outputs of a number of models and and correlates the elements they
correspond to with the respective elements in the raw corpus labels. It then
aggregates these model results according to their values and whether or not they
agree with the corpus.
:returns: two pandas DataFrames:
* `in_gold`: A DataFrame listing Elements in the corpus but with low agreement
among the models, sorted by least agreement upwards
* `not_in_gold`: a DataFrame listing elements that are not in the corpus labels
but for which there is high agreement among the models of their existence
These DataFrames have the following columns:
* `in_gold`: boolean value of whether or not the element is in the corpus "gold standard"
* `count`: the number of models in agreement on this datapoint
* `models`: the list of the names of models in agreement on that datapoint as listed by
by their names in the `predicted_features` dictionary
"""
df_cols = align_over_cols + keep_cols
if label_name is None:
label_name = "class"
# create gold features dataframe
if gold_feats is None:
gold_feats = predicted_features[list(predicted_features.keys())[0]]
gold_df = gold_feats[df_cols + [corpus_label_col]].copy()
gold_df["models"] = "GOLD"
gold_df["in_gold"] = True
gold_df.rename(columns={corpus_label_col: label_name}, inplace=True)
# create list of features
features_list = [gold_df]
# now populate that list with all of the features from the model
for model_name in predicted_features.keys():
model_pred_df = predicted_features[model_name][
df_cols + [predicted_label_col]
].copy()
model_pred_df["models"] = model_name
model_pred_df["in_gold"] = False
model_pred_df.rename(columns={predicted_label_col: label_name}, inplace=True)
features_list.append(model_pred_df)
# now combine the dataframes of features and combine them with a groupby operation`
all_features = pd.concat(features_list)
all_features["count"] = 1
all_features.loc[all_features.in_gold, "count"] = 0
# create groupby aggregation dict:
aggby = {"in_gold": "any", "count": "sum", "models": lambda x: list(x)}
aggby.update({col: "first" for col in keep_cols})
# now groupby
grouped_features = (
all_features.groupby(align_over_cols + [label_name]).agg(aggby).reset_index()
)
grouped_features.sort_values(
["count"] + align_over_cols, ascending=False, inplace=True
)
in_gold = grouped_features[grouped_features.in_gold].sort_values(
"count", ascending=True, kind="mergesort"
)
not_in_gold = grouped_features[~grouped_features.in_gold].sort_values(
"count", ascending=False, kind="mergesort"
)
return in_gold, not_in_gold
|
import cv2
from sklearn.externals import joblib
from .image_preprocessing import get_features
from ..config import config
def predict_sum(image_path):
print('Read image...')
img = cv2.imread(image_path)
print('Image read. Loading classifier...')
clf = joblib.load(config['CLASSIFIER']['TrainedModelPath'])
print('Getting features...')
x_test, img_with_coin = get_features(img)
y_sum = 0
for i in range(len(x_test)):
coin_value = int(clf.predict([x_test[i]])[0])
y_sum = y_sum + coin_value
print('Sum = {}'.format(y_sum))
color = (255, 0, 0)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img_with_coin, str(y_sum), (150, 200), font, 6, color, 2)
cv2.imshow("Result", img_with_coin)
cv2.waitKey()
|
from __future__ import print_function
import numpy as np
import theano
import theano.tensor as T
import lasagne
import ctc
num_classes = 5
mbsz = 1
min_len = 12
max_len = 12
n_hidden = 100
grad_clip = 100
input_lens = T.ivector('input_lens')
output = T.ivector('output')
output_lens = T.ivector('output_lens')
l_in = lasagne.layers.InputLayer(shape=(mbsz, max_len, num_classes))
h1f = lasagne.layers.RecurrentLayer(l_in, n_hidden, grad_clipping=grad_clip,
nonlinearity=lasagne.nonlinearities.rectify)
h1b = lasagne.layers.RecurrentLayer(l_in, n_hidden, grad_clipping=grad_clip,
nonlinearity=lasagne.nonlinearities.rectify, backwards = True)
h1 = lasagne.layers.ElemwiseSumLayer([h1f, h1b])
h2f = lasagne.layers.RecurrentLayer(h1, n_hidden, grad_clipping=grad_clip,
nonlinearity=lasagne.nonlinearities.rectify)
h2b = lasagne.layers.RecurrentLayer(h1, n_hidden, grad_clipping=grad_clip,
nonlinearity=lasagne.nonlinearities.rectify, backwards = True)
h2 = lasagne.layers.ElemwiseSumLayer([h2f, h2b])
h3 = lasagne.layers.RecurrentLayer(h2, num_classes, grad_clipping=grad_clip,
nonlinearity=lasagne.nonlinearities.linear)
# Turn <minibatch_size, max_length, num_classes> into <max_length, minibatch_size, num_classes>
l_out = lasagne.layers.DimshuffleLayer(h3, (1, 0, 2))
network_output = lasagne.layers.get_output(l_out)
cost = T.mean(ctc.cpu_ctc_th(network_output, input_lens, output, output_lens))
grads = T.grad(cost, wrt=network_output)
all_params = lasagne.layers.get_all_params(l_out)
updates = lasagne.updates.adam(cost, all_params, 0.001)
train = theano.function([l_in.input_var, input_lens, output, output_lens], cost, updates=updates,
allow_input_downcast=True)
predict = theano.function([l_in.input_var], network_output,
allow_input_downcast=True)
get_grad = theano.function([l_in.input_var, input_lens, output, output_lens], grads,
allow_input_downcast=True)
from loader import DataLoader
data_loader = DataLoader(mbsz=mbsz, min_len=min_len, max_len=max_len, num_classes=num_classes)
i = 1
while True:
i += 1
print(i)
sample = data_loader.sample()
cost = train(*sample)
out = predict(sample[0])
print(cost)
print("input", sample[0][0].argmax(1))
print("prediction", out[:, 0].argmax(1))
print("expected", sample[2][:sample[3][0]])
if i == 10000:
grads = get_grad(*sample)
import ipdb; ipdb.set_trace()
|
# Copyright 2022 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import tensorflow as tf
import torch
from fastestimator.backend import binary_crossentropy
from fastestimator.op.tensorop.loss.focal_loss import focal_loss
class TestFocalLoss(unittest.TestCase):
def test_focal_loss_bc_tf(self):
true = tf.constant([[1], [1], [1], [0], [0], [0]])
pred = tf.constant([[0.97], [0.91], [0.73], [0.27], [0.09], [0.03]])
fl = focal_loss(y_pred=pred, y_true=true,
gamma=None, alpha=None) # 0.1464
bc = binary_crossentropy(y_pred=pred, y_true=true)
self.assertAlmostEqual(bc, fl, delta=0.0001)
def test_focal_loss_tf(self):
true = tf.constant([[1], [1], [1], [0], [0], [0]])
pred = tf.constant([[0.97], [0.91], [0.73], [0.27], [0.09], [0.03]])
fl = focal_loss(y_pred=pred, y_true=true, gamma=2.0, alpha=0.25)
self.assertAlmostEqual(0.004, fl, delta=0.0001)
def test_focal_loss_bc_torch(self):
true = torch.tensor([[1], [1], [1], [0], [0], [0]]).to(torch.float32)
pred = torch.tensor([[0.97], [0.91], [0.73], [0.27], [
0.09], [0.03]]).to(torch.float32)
fl = focal_loss(y_pred=pred, y_true=true, gamma=None, alpha=None)
bc = binary_crossentropy(y_pred=pred, y_true=true)
self.assertAlmostEqual(bc, fl, delta=0.0001)
def test_focal_loss_torch(self):
true = torch.tensor([[1], [1], [1], [0], [0], [0]]).to(torch.float32)
pred = torch.tensor([[0.97], [0.91], [0.73], [0.27], [
0.09], [0.03]]).to(torch.float32)
fl = focal_loss(y_pred=pred, y_true=true, gamma=2.0, alpha=0.25)
self.assertAlmostEqual(0.004, fl, delta=0.0001)
|
"""Serializer of the operations details"""
# DRF
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
# Models
from root.Acme.models import Category, Product, OrderRequest, OperationDetail
from root.users.models import User, Profile
class OperationsModelSerializer(serializers.ModelSerializer):
"""Acme operations details model serializer."""
class Meta:
"""Meta class"""
model = OperationDetail
fields = ["category_id", "user_id", "product_id", "type_operation_id", "quantity_total", "total_charge"]
read_only_fields = [
"id",
]
class CreateOperationsSerializer(serializers.ModelSerializer):
"""Handle the creation of operation record"""
category_id = serializers.CharField(required=True)
user_id = serializers.CharField(required=True)
product_id = serializers.CharField(required=True)
type_operation_id = serializers.CharField(required=True)
quantity_total = serializers.IntegerField(required=True)
class Meta:
"""Meta class."""
model = OperationDetail
fields = ["category_id", "user_id", "product_id", "type_operation_id", "quantity_total"]
read_only_fields = ["id", "total_charge"]
def validate_category_id(self, data):
"""Validate that the category to be assigned exists"""
try:
cat = Category.objects.get(id=data)
except Category.DoesNotExist:
raise serializers.ValidationError("🚨 The category does not exist with that name. 🚨")
return data
def validate_user_id(self, data):
"""Validate that the user to be assigned exists"""
try:
usr = User.objects.get(id=data)
except User.DoesNotExist:
raise serializers.ValidationError("🚨 The user does not exist. 🚨")
return data
def validate_product_id(self, data):
"""Validate that the product to be assigned exists"""
try:
produc = Product.objects.get(id=data)
except Product.DoesNotExist:
raise serializers.ValidationError("🚨 The product does not exist with that name. 🚨")
return data
def validate_type_operation_id(self, data):
"""Validate that the order request to be assigned exists"""
try:
ord = OrderRequest.objects.get(id=data)
except OrderRequest.DoesNotExist:
raise serializers.ValidationError("🚨 The order request does not exist with that name. 🚨")
return data
def validate(self, data):
"""Validate the existence of sufficient items for the transaction
Validate that there are sufficient funds for the transaction.
"""
pro = Product.objects.get(id=data["product_id"])
price_total = pro.price * data["quantity_total"]
if data["quantity_total"] > Product.objects.get(id=data["product_id"]).quantity:
raise serializers.ValidationError("🚨 There are not enough items to make the transaction. 🚨")
if price_total > Profile.objects.get(id=data["user_id"]).initial_balance:
raise serializers.ValidationError("🚨 There are not enough funds to carry out the transaction. 🚨")
return data
def create(self, data):
"""Create operation and update stats."""
_category = Category.objects.get(id=data["category_id"])
_user = User.objects.get(id=data["user_id"])
_produc = Product.objects.get(id=data["product_id"])
_order_rqst = OrderRequest.objects.get(id=data["type_operation_id"])
_produc.quantity = _produc.quantity - data["quantity_total"]
_total_charge = _produc.price * data["quantity_total"]
# import ipdb; ipdb.set_trace()
# breakpoint()
op_detail = OperationDetail.objects.create(
category_id=_category,
user_id=_user,
product_id=_produc,
type_operation_id=_order_rqst,
quantity_total=data["quantity_total"],
total_charge=_total_charge,
)
profile = Profile.objects.get(id=data["user_id"])
profile.initial_balance -= _total_charge
profile.save()
return op_detail
|
# trim_prot_alignments.py
# This script is to trim the protein aligned exons for all samples based on the ref exon in ORF.
# The input will be the EXON_NAME_AA.fasta and the output will be a EXON_NAME_AA_trimmed.fasta file with their
# trimmed protein alignments
# This script can be executed by running $ python trim_prot_alignments.py EXON_NAME
# Made by: Elfy Ly
# Date: 2 July 2020
import sys
import os
import re
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
EXON = sys.argv[1]
ACGT_LENGTH_PERC_THRESH = 0.35
NX_PERC_TRESH = 0.20
DIVERSE_PERC_THRESH = 0.30
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
# Create directory of given path if it doesn't exist
def create_dir(path):
if not os.path.exists(path):
os.mkdir(path)
print("Directory ", path, " Created ")
else:
print("Directory ", path, " already exists")
def create_ffasta(path):
ffasta = open(path, "w+")
ffasta.close()
def replace_to_n(record):
nt_dict[EXON][record.id] = {}
position = 0
new_seq = ""
for base in record.seq:
position += 1
# change '!' fragment shifts and '-' gaps to 'N' in new sequence
if base == "-" or base == "!":
new_seq += "N"
nt_dict[EXON][record.id][position] = "N"
else:
new_seq += base
nt_dict[EXON][record.id][position] = base
return new_seq
def count_bases(new_seq):
nbase_ACGT = 0
for base in new_seq:
if base != "N":
nbase_ACGT += 1
return nbase_ACGT
# create empty dictionary to count x's per position
def create_empty_nested_dict(new_dict, old_dict):
# for exon in old_dict:
for sample in old_dict[EXON]:
new_dict[EXON] = {}
for position in old_dict[EXON][sample]:
new_dict[EXON][position] = 0
return new_dict
# fill in the dictionary the number of x's per position
def count_X_dict(new_dict, old_dict):
for exon in old_dict:
for sample in old_dict[exon]:
for position in old_dict[exon][sample]:
if old_dict[exon][sample][position] == "X":
new_dict[exon][position] += 1
return new_dict
def create_empty_trimmed_dict(aa_dict):
trimmed_dict = {}
for exon in aa_dict:
trimmed_dict[exon] = {}
for sample in aa_dict[exon]:
trimmed_dict[exon][sample] = {}
return trimmed_dict
def calculate_nx_thresh(aa_dict, exon):
nseq = 0
for sample in aa_dict[exon]:
nseq += 1
nx_thresh = NX_PERC_TRESH * nseq
return nx_thresh
# initialize the nucleotides positions (regions) for each codon
# 1 aa position consists of 3 nt positions (1 AA - 1 NT, 2 NT, 3 NT)(2e AA - 4 NT, 5 NT, 6 NT) etc.
def initialize_nt_positions(aa_position):
region = []
nt1 = ((aa_position-1) * 3)+1 #-1 to intialize the first nucleotides
nt2 = nt1 + 1
nt3 = nt2 + 1
region.append(nt1)
region.append(nt2)
region.append(nt3)
return region
# counts for each exon per position if internal stop codons are present
def count_stop_dict(new_dict, trimmed_aa_x_dict, aa_dict):
for exon in trimmed_aa_x_dict:
for sample in trimmed_aa_x_dict[exon]:
for position in trimmed_aa_x_dict[exon][sample]:
last_position = len(aa_dict[exon][sample])
if position != last_position and trimmed_aa_x_dict[exon][sample][position] == "*":
new_dict[exon][position] += 1
return new_dict
def append_trimmed_dict(stop_dict, aa_dict, trimmed_aa_x_dict, trimmed_aa_x_stop_dict,
trimmed_nt_x_dict, trimmed_nt_x_stop_dict):
for exon in stop_dict:
for sample in aa_dict[exon]:
for aa_position in stop_dict[exon]:
if stop_dict[exon][aa_position] == 0:
# appends aa to the trimmed aa dictionary if there are no internal stop codons
trimmed_aa_x_stop_dict[exon][sample][aa_position] = trimmed_aa_x_dict[exon][sample][aa_position]
region = initialize_nt_positions(aa_position)
# print(str(aa_position) + " belongs to " + str(region))
for nt_position in region:
trimmed_nt_x_stop_dict[exon][sample][nt_position] = trimmed_nt_x_dict[exon][sample][nt_position]
def count_nstops(stop_dict):
total_nstops = 0
for exon in stop_dict:
for position in stop_dict[exon]:
if stop_dict[exon][position] > 0:
total_nstops += 1
return total_nstops
def create_empty_diversity_dict(count_aa_diversity, aa_dict, trimmed_aa_x_stop_dict):
for exon in aa_dict:
count_aa_diversity[exon] = {}
for sample in trimmed_aa_x_stop_dict[exon]:
for aa_position in trimmed_aa_x_stop_dict[exon][sample]:
count_aa_diversity[exon][aa_position] = {}
return count_aa_diversity
def count_diversity_dict(count_aa_diversity, trimmed_aa_x_stop_dict):
for exon in count_aa_diversity:
for sample in trimmed_aa_x_stop_dict[exon]:
for aa_position in trimmed_aa_x_stop_dict[exon][sample]:
aa = trimmed_aa_x_stop_dict[exon][sample][aa_position]
appended_aa = count_aa_diversity[exon][aa_position].keys()
if aa in appended_aa:
count_aa_diversity[exon][aa_position][aa] += 1
else:
count_aa_diversity[exon][aa_position][aa] = 1
return count_aa_diversity
def calculate_div_thresh(aa_dict, exon) :
nseq = 0
for sample in aa_dict[exon]:
nseq += 1
diverse_thresh = nseq * DIVERSE_PERC_THRESH
return diverse_thresh
def create_empty_final_dict(count_aa_diversity, aa_dict):
trimmed_dict = {}
for exon in count_aa_diversity:
trimmed_dict[exon] = {}
for sample in aa_dict[exon]:
trimmed_dict[exon][sample] = ""
return trimmed_dict
def append_final_trimmed_dict(count_aa_diversity, aa_dict, trimmed_aa_x_stop_diverse_dict, trimmed_aa_x_stop_dict,
trimmed_nt_x_stop_diverse_dict, trimmed_nt_x_stop_dict):
for exon in count_aa_diversity:
diverse_thresh = calculate_div_thresh(aa_dict, exon)
for sample in aa_dict[exon]:
for aa_position in count_aa_diversity[exon]:
n_most_prevelant = 0
for aa in count_aa_diversity[exon][aa_position]:
if count_aa_diversity[exon][aa_position][aa] > n_most_prevelant:
n_most_prevelant = count_aa_diversity[exon][aa_position][aa]
if n_most_prevelant > diverse_thresh:
trimmed_aa_x_stop_diverse_dict[exon][sample] += trimmed_aa_x_stop_dict[exon][sample][aa_position]
region = initialize_nt_positions(aa_position)
for nt_position in region:
trimmed_nt_x_stop_diverse_dict[exon][sample] += trimmed_nt_x_stop_dict[exon][sample][
nt_position]
def write_ffasta(final_trimmed_dict, path_to_trimmed_prot_dir, extension):
for exon in final_trimmed_dict:
path_to_nt_ffasta = path_to_trimmed_prot_dir + exon + extension
nt_ffasta = open(path_to_nt_ffasta, "w+")
for sample in final_trimmed_dict[exon]:
nt_ffasta.write(">" + sample + "\n")
nt_seq = final_trimmed_dict[exon][sample]
nt_ffasta.write(nt_seq + "\n")
nt_ffasta.close()
# Code starts here
path_to_macse_dir = "./results/A13_prot_alignments/"
path_to_trimmed_prot_dir = "./results/A14_trimmed_prot/"
create_dir(path_to_trimmed_prot_dir)
aa_dict = {}
nt_dict = {}
path_to_fNT = path_to_macse_dir + EXON + "_NT.fasta"
aa_dict[EXON] = {}
nt_dict[EXON] = {}
for record in SeqIO.parse(path_to_fNT, "fasta"):
# create new sequence to translate gaps and frameshifts to 'N'
new_seq = replace_to_n(record)
'''Step 1: Deletes the sample if sequence base length too short (if too many gaps or fragment shifts)
Final sequences shorter than 35% of unambiguous nucleotide positions based on the reference exon length
were removed.'''
new_seq_len = len(record.seq)
min_nbases = new_seq_len * ACGT_LENGTH_PERC_THRESH
nbase_ACGT = count_bases(new_seq)
if nbase_ACGT > min_nbases:
# translate dna sequence to protein sequence
nt_seq = Seq(new_seq, generic_dna)
protein_seq = nt_seq.translate()
# assign amino acid per position in aa_dict
position = 0
aa_dict[EXON][record.id] = {}
for aa in protein_seq:
position += 1
aa_dict[EXON][record.id][position] = aa
protein_seq_length = len(protein_seq)
else:
print(record.id + " has too few nucleotide bases: " + str(nbase_ACGT) +
". It's below min_bases: " + str(min_nbases)) + ". This sample has been deleted from " + EXON + "."
# evaluate protein alignment
'''Step 2: Calculates per exon for every position the number of X's. Positions with > 20% ambiguous amino acids
(threshold) resulting from unidentified nucleotides (Ns) and gaps (-) were removed'''
x_dict = {}
x_dict = create_empty_nested_dict(x_dict, aa_dict)
x_dict = count_X_dict(x_dict, aa_dict)
# create trimmed dictionary for AA and NT
trimmed_aa_x_dict = create_empty_trimmed_dict(aa_dict)
trimmed_nt_x_dict = create_empty_trimmed_dict(aa_dict)
total_nx = 0
for exon in x_dict:
nx_thresh = calculate_nx_thresh(aa_dict, exon)
for sample in aa_dict[exon]:
for aa_position in x_dict[exon]:
nx = x_dict[exon][aa_position]
# checks if number of X's per position are below threshold, if yes: add base to the trimmed dictionary for
# protein sequences and nucleotide sequences
if nx <= nx_thresh:
trimmed_aa_x_dict[exon][sample][aa_position] = aa_dict[exon][sample][aa_position]
region = initialize_nt_positions(aa_position)
# print(str(aa_position) + " belongs to " + str(region))
for nt_position in region:
trimmed_nt_x_dict[exon][sample][nt_position] = nt_dict[exon][sample][nt_position]
else:
total_nx += 1
# print("number of X's: " + str(nx) + " on position: " + str(aa_position) + " exceeds the threshold: " +
# str(nx_thresh) + " in exon: " + exon)
#
'''Step 3: Checks if internal stop codon (*) indicative of misalignment is present'''
# creates empty dictionary for stop codons
stop_dict = {}
stop_dict = create_empty_nested_dict(stop_dict, trimmed_aa_x_dict)
stop_dict = count_stop_dict(stop_dict, trimmed_aa_x_dict, aa_dict)
# create trimmed AA and NT dictionary for each exon per position without gaps, shifts and internal stop codons
trimmed_aa_x_stop_dict = create_empty_trimmed_dict(aa_dict)
trimmed_nt_x_stop_dict = create_empty_trimmed_dict(aa_dict)
append_trimmed_dict(stop_dict, aa_dict, trimmed_aa_x_dict, trimmed_aa_x_stop_dict,
trimmed_nt_x_dict, trimmed_nt_x_stop_dict)
# Counts number of present internal stop codons (*) indicative of misalignment
total_nstops = count_nstops(stop_dict)
'''Step 4: Deletes if a codon position was too diverse (most prevalent amino acid identical for < 30% of the taxa)'''
# create dictionary to count different aa in each position
count_aa_diversity = {}
count_aa_diversity = create_empty_diversity_dict(count_aa_diversity, aa_dict, trimmed_aa_x_stop_dict)
count_aa_diversity = count_diversity_dict(count_aa_diversity, trimmed_aa_x_stop_dict)
trimmed_nt_x_stop_diverse_dict = create_empty_final_dict(count_aa_diversity, aa_dict)
trimmed_aa_x_stop_diverse_dict = create_empty_final_dict(count_aa_diversity, aa_dict)
append_final_trimmed_dict(count_aa_diversity, aa_dict, trimmed_aa_x_stop_diverse_dict, trimmed_aa_x_stop_dict,
trimmed_nt_x_stop_diverse_dict, trimmed_nt_x_stop_dict)
'''Step 5: Write fasta files for trimmed AA and NT sequences in directory path_to_trimmed_prot_dir'''
nt = "_NT.fasta"
aa = "_AA.fasta"
write_ffasta(trimmed_nt_x_stop_diverse_dict, path_to_trimmed_prot_dir, nt)
write_ffasta(trimmed_aa_x_stop_diverse_dict, path_to_trimmed_prot_dir, aa)
|
import os.path as osp
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class IGNDataset(CustomDataset):
"""IGN dataset.
In segmentation map annotation for IGN, 0 represents zones without
information so ``reduce_zero_label`` is fixed to True.
The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
'.png'.
"""
CLASSES = ('Dense forest', 'Sparse forest', 'Moor', 'Herbaceous formation', 'Building', 'Road')
PALETTE = [[128, 0, 0], [0, 128, 0], [128, 128, 0], [128, 0, 128], [0, 0, 128], [200, 200, 200]]
def __init__(self, **kwargs):
super(IGNDataset, self).__init__(
img_suffix='.png',
seg_map_suffix='.png',
reduce_zero_label=True,
**kwargs)
assert osp.exists(self.img_dir)
|
# -*- coding: utf-8 -*-
from java.sql import Connection
from com.ziclix.python.sql import zxJDBC
from datetime import datetime
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.features import BaseDatabaseFeatures
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.backends.base.introspection import BaseDatabaseIntrospection, FieldInfo, TableInfo
from django.db.backends.base.client import BaseDatabaseClient
from django.db.backends.base.validation import BaseDatabaseValidation
from django.db.backends.base.creation import BaseDatabaseCreation
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
__all__ = (
'JDBCBaseDatabaseWrapper',
'JDBCBaseDatabaseFeatures',
'JDBCBaseDatabaseOperations',
'JDBCBaseDatabaseIntrospection',
'JDBCBaseDatabaseClient',
'JDBCBaseDatabaseValidation',
'JDBCBaseDatabaseCreation',
'JDBCFieldInfo',
'JDBCTableInfo',
'JDBCBaseDatabaseSchemaEditor',
'JDBCCursorWrapper',
'JDBCConnection',
)
class JDBCBaseDatabaseWrapper(BaseDatabaseWrapper):
"""
Represents a database connection using zxJDBC.
"""
jdbc_default_host = None
jdbc_default_port = None
jdbc_default_name = None
jdbc_driver_class_name = None
jdbc_connection_url_pattern = None
Database = zxJDBC
Error = Database.Error
NotSupportedError = Database.NotSupportedError
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
ProgrammingError = Database.ProgrammingError
def __init__(self, *args, **kwargs):
super(JDBCBaseDatabaseWrapper, self).__init__(*args, **kwargs)
def get_jdbc_settings(self):
settings_dict = dict(self.settings_dict) # copy instead of reference
if not settings_dict.get('HOST', None):
settings_dict['HOST'] = self.jdbc_default_host
if not settings_dict.get('PORT', None):
settings_dict['PORT'] = self.jdbc_default_port
if not settings_dict.get('NAME', None):
settings_dict['NAME'] = self.jdbc_default_name
return settings_dict
def get_jdbc_driver_class_name(self):
return self.jdbc_driver_class_name
def get_jdbc_connection_url(self):
return self.jdbc_connection_url_pattern % self.get_jdbc_settings()
def get_new_jndi_connection(self):
"""
Returns a zxJDBC Connection object obtained from a JNDI data source if
the settings dictionary contains the JNDI_NAME entry on the
DATABASE_OPTIONS dictionary, or None if it doesn't.
:return: zxJDBC Connection
"""
settings_dict = dict(self.settings_dict)
if 'OPTIONS' not in settings_dict:
return None
if 'JNDI_NAME' not in settings_dict['OPTIONS']:
return None
name = settings_dict['OPTIONS']['JNDI_NAME']
props = settings_dict['OPTIONS'].get('JNDI_CONTEXT_OPTIONS', {})
return zxJDBC.lookup(name, keywords=props)
def get_connection_params(self):
settings_dict = dict(self.settings_dict)
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
settings_dict['NAME'] = settings_dict['NAME'] or self.jdbc_default_name
return settings_dict
def get_new_connection(self, conn_params):
connection = self.get_new_jndi_connection()
if not connection:
connection = zxJDBC.connect(self.get_jdbc_connection_url(),
conn_params['USER'],
conn_params['PASSWORD'],
self.jdbc_driver_class_name,
**conn_params['OPTIONS'])
self._set_default_isolation_level(connection)
return connection
def create_cursor(self):
return JDBCCursorWrapper(self.connection.cursor())
def _set_autocommit(self, autocommit):
self.connection.autocommit = autocommit
@staticmethod
def _set_default_isolation_level(connection):
"""
Make transactions transparent to all cursors. Must be called by zxJDBC backends
after instantiating a connection.
:param connection: zxJDBC connection
"""
jdbc_connection = connection.__connection__
jdbc_connection.setTransactionIsolation(JDBCConnection.TRANSACTION_READ_COMMITTED)
class JDBCBaseDatabaseOperations(BaseDatabaseOperations):
"""
zxJDBC supports dates, times, datetimes and decimal directly, so we
override the convert methods of django here.
"""
def value_to_db_date(self, value):
return value
def value_to_db_datetime(self, value):
return value
def value_to_db_time(self, value):
return value
def value_to_db_decimal(self, value, max_digits, decimal_places):
return value
def year_lookup_bounds(self, value):
first = datetime(value, 1, 1)
second = datetime(value, 12, 31, 23, 59, 59, 999999)
return [first, second]
class JDBCCursorWrapper(object):
"""
A simple wrapper to do the "%s" -> "?" replacement before running zxJDBC's
execute or executemany.
"""
def __init__(self, cursor):
self.cursor = cursor
def __get_arraysize(self):
return self.cursor.arraysize
def __set_arraysize(self, size):
self.cursor.arraysize = size
def __get_rowcount(self):
if self.cursor.updatecount > self.cursor.rowcount:
return self.cursor.updatecount
return self.cursor.rowcount
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.next, None)
def execute(self, sql, params=None):
if not params:
params = tuple()
sql = sql % (('?',) * len(params))
self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
if len(param_list) > 0:
sql = sql % (('?',) * len(param_list[0]))
self.cursor.executemany(sql, param_list)
def callproc(self, procname, parameters=None):
return self.cursor.callproc(procname, parameters)
def close(self):
return self.cursor.close()
def fetchone(self):
try:
return self.cursor.fetchone()
except JDBCBaseDatabaseWrapper.DatabaseError:
return None
def fetchmany(self, size=None):
if not size:
size = self.cursor.arraysize
# `fetchmany` may rise an IndexError if the result set is
# smaller than the size parameter. We fallback to `fetchall`
# in that case.
try:
return self.cursor.fetchmany(size)
except (IndexError, JDBCBaseDatabaseWrapper.DatabaseError):
return self.cursor.fetchall()
def fetchall(self):
try:
return self.cursor.fetchall()
except (IndexError, JDBCBaseDatabaseWrapper.DatabaseError):
return []
def nextset(self):
return self.cursor.nextset()
def setinputsizes(self, sizes):
return self.cursor.setinputsizes(sizes)
def setoutputsize(self, sizes, column=None):
return self.cursor.setoutputsize(sizes, column)
arraysize = property(fget=__get_arraysize, fset=__set_arraysize)
rowcount = property(fget=__get_rowcount)
class JDBCBaseDatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
class JDBCBaseDatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
zxJDBC.BIGINT: 'BigIntegerField',
zxJDBC.BINARY: 'BinaryField',
zxJDBC.BIT: 'BooleanField',
zxJDBC.BLOB: 'BinaryField',
zxJDBC.BOOLEAN: 'BooleanField',
zxJDBC.CHAR: 'CharField',
zxJDBC.CLOB: 'TextField',
zxJDBC.DATE: 'DateField',
zxJDBC.DATETIME: 'DateTimeField',
zxJDBC.DECIMAL: 'DecimalField',
zxJDBC.DOUBLE: 'FloatField',
zxJDBC.FLOAT: 'FloatField',
zxJDBC.INTEGER: 'IntegerField',
zxJDBC.LONGNVARCHAR: 'TextField',
zxJDBC.LONGVARBINARY: 'BinaryField',
zxJDBC.LONGVARCHAR: 'TextField',
zxJDBC.NCHAR: 'CharField',
zxJDBC.NCLOB: 'TextField',
zxJDBC.NUMBER: 'IntegerField',
zxJDBC.NVARCHAR: 'CharField',
zxJDBC.REAL: 'FloatField',
zxJDBC.SMALLINT: 'SmallIntegerField',
zxJDBC.STRING: 'TextField',
zxJDBC.TIME: 'TimeField',
zxJDBC.TIMESTAMP: 'DateTimeField',
zxJDBC.TINYINT: 'SmallIntegerField',
zxJDBC.VARBINARY: 'BinaryField',
zxJDBC.VARCHAR: 'CharField',
}
class JDBCBaseDatabaseClient(BaseDatabaseClient):
pass
class JDBCBaseDatabaseValidation(BaseDatabaseValidation):
pass
class JDBCBaseDatabaseCreation(BaseDatabaseCreation):
pass
class JDBCFieldInfo(FieldInfo):
pass
class JDBCTableInfo(TableInfo):
pass
class JDBCBaseDatabaseSchemaEditor(BaseDatabaseSchemaEditor):
pass
class JDBCConnection(Connection):
pass
|
def chkAcid(acid):
acidLegals = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
if len(acid) == 8:
for ch in acid:
if ch not in acidLegals:
return False
return True
else:
return False
def chkJobid(jobID):
randAlpha = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*'
legalLength = 6
if 'ROTID' in jobID:
legalLength = 11
if len(jobID) == legalLength:
for ch in jobID:
if ch not in randAlpha:
return False
return True
else:
return False
def chkOpid(opID):
opidLegals = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_-'
legalLength = 15
if len(opID) <= legalLength:
for ch in opID:
if ch not in opidLegals:
return False
return True
else:
return False |
"""
Test the multi_sample_split module
"""
import numpy as np
from numpy.testing import assert_almost_equal
from hidimstat.multi_sample_split import aggregate_medians, aggregate_quantiles
def test_aggregate_medians():
n_iter, n_features = 20, 5
list_sf = (1.0 / (np.arange(n_iter * n_features) + 1))
list_sf = list_sf.reshape((n_iter, n_features))
list_sf[15:, :] = 3e-3
sf = aggregate_medians(list_sf)
expected = 0.04 * np.ones(n_features)
for i in np.arange(expected.size):
assert_almost_equal(sf[i], expected[i], decimal=2)
def test_aggregate_quantiles():
n_iter, n_features = 20, 5
list_sf = (1.0 / (np.arange(n_iter * n_features) + 1))
list_sf = list_sf.reshape((n_iter, n_features))
list_sf[15:, :] = 3e-3
sf = aggregate_quantiles(list_sf)
expected = 0.03 * np.ones(n_features)
for i in np.arange(expected.size):
assert_almost_equal(sf[i], expected[i], decimal=2)
|
import numpy as np
class Relu(object):
def __call__(self, a):
return np.maximum(0, a)
def gradient(self, a):
return np.heaviside(a, 0)
class Tanh(object):
def __call__(self,x):
return np.tanh(x)
def gradient(self, x):
return 1.0 - np.tanh(x)**2
class Sigmoid(object):
def __call__(self, a):
output = 1 / (1 + np.exp(-a))
self._ouptut = output
return output
def gradient(self, Y):
output = self(Y)
return output * (1 - output)
|
# -*- coding: utf-8 -*-
# effort of writing python 2/3 compatiable code
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from future.utils import iteritems
from operator import itemgetter, attrgetter, methodcaller
import sys, time, argparse, csv
import cProfile
if sys.version < '3':
from codecs import getwriter
stderr = getwriter('utf-8')(sys.stderr)
stdout = getwriter('utf-8')(sys.stdout)
else:
stderr = sys.stderr
import dynet as dt
from collections import Counter
import random
import util
import config
import cPickle
import copy
from action import *
from statesearch import *
import nnmodule as nnmod
######## START OF THE CODE ########
class SqaModel():
WORD_EMBEDDING_DIM = config.d["WORD_EMBEDDING_DIM"]
LSTM_HIDDEN_DIM = config.d["LSTM_HIDDEN_DIM"]
def __init__(self, init_learning_rate, vw, reload_embeddings = True):
self.model = dt.Model()
self.vw = vw
UNK = self.vw.w2i["_UNK_"]
n_words = vw.size()
print("init vw =", self.vw.size(), "words")
self.learning_rate = init_learning_rate
self.learner = dt.SimpleSGDTrainer(self.model, learning_rate=init_learning_rate)
# self.learner = dt.SimpleSGDTrainer(self.model)
self.E = self.model.add_lookup_parameters((n_words, SqaModel.WORD_EMBEDDING_DIM))
# similarity(v,o): v^T o
self.SelHW = self.model.add_parameters((4 * SqaModel.WORD_EMBEDDING_DIM))
self.SelIntraFW = self.model.add_parameters((SqaModel.WORD_EMBEDDING_DIM / 2, SqaModel.WORD_EMBEDDING_DIM))
self.SelIntraHW = self.model.add_parameters((SqaModel.WORD_EMBEDDING_DIM, SqaModel.WORD_EMBEDDING_DIM * 2))
self.SelIntraBias = self.model.add_parameters((config.d["DIST_BIAS_DIM"]))
self.ColTypeN = self.model.add_parameters((1))
self.ColTypeW = self.model.add_parameters((1))
self.NulW = self.model.add_parameters((SqaModel.WORD_EMBEDDING_DIM))
''' new ways to add module '''
self.SelColFF = nnmod.FeedForwardModel(self.model, 4)
self.WhereColFF = nnmod.FeedForwardModel(self.model, 5)
self.QCMatch = nnmod.QuestionColumnMatchModel(self.model, SqaModel.WORD_EMBEDDING_DIM)
self.NegFF = nnmod.FeedForwardModel(self.model, 2)
self.FpWhereColFF = nnmod.FeedForwardModel(self.model, 9)
# LSTM question representation
self.builders = [
dt.LSTMBuilder(1, SqaModel.WORD_EMBEDDING_DIM, SqaModel.LSTM_HIDDEN_DIM, self.model),
dt.LSTMBuilder(1, SqaModel.WORD_EMBEDDING_DIM, SqaModel.LSTM_HIDDEN_DIM, self.model)
]
self.pH = self.model.add_parameters((SqaModel.WORD_EMBEDDING_DIM, SqaModel.LSTM_HIDDEN_DIM*2))
# LSTM question representation
self.prev_builders = [
dt.LSTMBuilder(1, SqaModel.WORD_EMBEDDING_DIM, SqaModel.LSTM_HIDDEN_DIM, self.model),
dt.LSTMBuilder(1, SqaModel.WORD_EMBEDDING_DIM, SqaModel.LSTM_HIDDEN_DIM, self.model)
]
self.prev_pH = self.model.add_parameters((SqaModel.WORD_EMBEDDING_DIM, SqaModel.LSTM_HIDDEN_DIM*2))
self.SelColFpWhereW = self.model.add_parameters((4))
self.SameAsPreviousW = self.model.add_parameters((2))
if config.d["USE_PRETRAIN_WORD_EMBEDDING"] and reload_embeddings:
n_hit_pretrain = 0.0
trie = config.d["embeddingtrie"]
print ("beginning to load embeddings....")
for i in range(n_words):
word = self.vw.i2w[i].lower()
results = trie.items(word+ config.d["recordtriesep"])
if len(results) == 1:
pretrain_v = np.array(list(results[0][1]))
pretrain_v = pretrain_v/np.linalg.norm(pretrain_v)
self.E.init_row(i,pretrain_v)
n_hit_pretrain += 1
else:
pretrain_v = self.E[i].npvalue()
pretrain_v = pretrain_v/np.linalg.norm(pretrain_v)
self.E.init_row(i,pretrain_v)
print ("the number of words that are in pretrain", n_hit_pretrain, n_words, n_hit_pretrain/n_words)
print ("loading complete!")
if config.d["USE_PRETRAIN_WORD_EMBEDDING"]:
self.Negate = nnmod.NegationModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK, "not", self.vw, self.E)
self.CondGT = nnmod.CompareModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK, "more greater larger than", self.vw, self.E)
self.CondGE = nnmod.CompareModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK, "more greater larger than or equal to at least", self.vw, self.E)
self.CondLT = nnmod.CompareModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK, "less fewer smaller than", self.vw, self.E)
self.CondLE = nnmod.CompareModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK, "less fewer smaller than or equal to at most", self.vw, self.E)
self.ArgMin = nnmod.ArgModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK, "least fewest smallest lowest shortest oldest", self.vw, self.E)
self.ArgMax = nnmod.ArgModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK, "most greatest biggest largest highest longest latest tallest", self.vw, self.E)
else:
self.Negate = nnmod.NegationModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK)
self.CondGT = nnmod.CompareModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK)
self.CondGE = nnmod.CompareModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK)
self.CondLT = nnmod.CompareModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK)
self.CondLE = nnmod.CompareModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK)
self.ArgMin = nnmod.ArgModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK)
self.ArgMax = nnmod.ArgModel(self.model, SqaModel.WORD_EMBEDDING_DIM, UNK)
def save(self,header):
print("Saving model with header = ", header)
f = open(header + "-extra.bin",'wb')
cPickle.dump(self.vw,f)
cPickle.dump(self.learning_rate,f)
f.close()
self.model.save(header + "-dynetmodel.bin")
#print("Done!")
@staticmethod
def load(header):
print("Loading model with header = ", header)
f = open(header + "-extra.bin",'rb')
vw = cPickle.load(f)
lr = cPickle.load(f)
f.close()
res = SqaModel(lr,vw,False) # do not waste time reload embeddings
#res.model.load(header + "-dynetmodel.bin")
res.model.populate(header + "-dynetmodel.bin")
#print("Done!")
return res
|
import copy
from .graph import Graph
'''
单源最短路
一次性得到单个点到全部点的最短路径
广度优先搜索
'''
class Dijkstra(Graph):
def __init__(self, size=10, graph=None):
super().__init__(size=size, graph=graph)
self.result = copy.deepcopy(self.graph)
def shortest_path(self, start=None, end=None):
S = {start: 0}
# 初始化 U
U = {v: self.graph[start][v] for v in range(0, self.size) if v != start}
while len(S) < self.size:
# min 函数的 key 参数传入一个函数
# key函数返回的结果作为比较大小或排序的依据
min_key = min(U, key=U.get)
S[min_key] = U.get(min_key)
U = {v: min(self.graph[min_key][v] + S[min_key],
U[v]) for v in U.keys() if v != min_key}
return S.get(end or 0)
|
__author__ = 'clarkmatthew'
from prettytable import PrettyTable
from colorama import Fore, init
from subprocess import Popen, PIPE
import re
cmd_string = 'euca-describe-images --show-empty-fields -h'
cmd = cmd_string.split()
p = Popen(cmd, stdout=PIPE)
p_out, p_err = p.communicate()
if p.returncode:
print str(p_out)
raise RuntimeError('Cmd:"{0}" failed. Code:{1}. stderr:"{1}"'
.format(cmd_string, p.returncode, p_err))
lines = p_out.splitlines()
args = {}
arg = None
for line in lines:
help = ""
print 'Looking at line:' + str(line)
if not line.strip():
continue
if re.search('^\w', line):
print 'Parsing arg line: ' + str(line)
line = line.strip()
split_line = line.split()
arg = line[0]
help = " ".join(line[1:-1])
args[arg] = help
print 'got arg:"{0}", and help:"{1}"'.format(arg, args[arg])
else:
print 'parsing help line for arg:{0}, adding:{1}'.format(arg, line.strip())
args[arg] += line.strip()
'''
pt = PrettyTable()
for arg in args:
pt.add_row([arg, args[arg]])
print pt
'''
|
#coding:utf-8
import requests
# 解析来自 高德 的数据,输出 地铁站和站点名称、经纬度等信息
## 遍历搜索
## 地图标注 |
'''
original_3DRecGAN++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
'''
from torchvision import datasets, transforms
from base import BaseDataLoader
import os
import torch
import pandas as pd
from base import BaseDataLoader
from base import DataPrefetcher
import numpy as np
import re
import matplotlib.pyplot as plt
class Data:
def __init__(self,path):
###############################################################
self.resolution = 64
config={}
# chair/stool/toilet
config['train_names'] = ['chair']
for name in config['train_names']:
config['X_train_'+name] = path+name+'/train_25d/voxel_grids_64/'
config['Y_train_'+name] = path+name+'/train_3d/voxel_grids_64/'
config['test_names']=['chair']
for name in config['test_names']:
config['X_test_'+name] = path+name+'/test_25d/voxel_grids_64/'
config['Y_test_'+name] = path+name+'/test_3d/voxel_grids_64/'
self.config = config
self.train_names = config['train_names']
self.test_names = config['test_names']
self.X_train_files, self.Y_train_files = self.load_X_Y_files_paths_all( self.train_names,label='train')
self.X_test_files, self.Y_test_files = self.load_X_Y_files_paths_all(self.test_names,label='test')
print ('X_train_files:',len(self.X_train_files))
print ('X_test_files:',len(self.X_test_files))
@staticmethod
def plotFromVoxels(voxels):
if len(voxels.shape)>3:
x_d = voxels.shape[0]
y_d = voxels.shape[1]
z_d = voxels.shape[2]
v = voxels[:,:,:,0]
v = np.reshape(v,(x_d,y_d,z_d))
else:
v = voxels
x, y, z = v.nonzero()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z, zdir='z', c='red')
plt.show()
def load_X_Y_files_paths_all(self, obj_names, label='train'):
x_str=''
y_str=''
if label =='train':
x_str='X_train_'
y_str ='Y_train_'
elif label == 'test':
x_str = 'X_test_'
y_str = 'Y_test_'
else:
print ('label error!!')
exit()
X_data_files_all = []
Y_data_files_all = []
for name in obj_names:
X_folder = self.config[x_str + name]
Y_folder = self.config[y_str + name]
X_data_files, Y_data_files = self.load_X_Y_files_paths(X_folder, Y_folder)
for X_f, Y_f in zip(X_data_files, Y_data_files):
if X_f[0:15] != Y_f[0:15]:
print ('index inconsistent!!')
exit()
X_data_files_all.append(X_folder + X_f)
Y_data_files_all.append(Y_folder + Y_f)
return X_data_files_all, Y_data_files_all
def load_X_Y_files_paths(self,X_folder, Y_folder):
X_data_files = [X_f for X_f in sorted(os.listdir(X_folder))]
Y_data_files = [Y_f for Y_f in sorted(os.listdir(Y_folder))]
return X_data_files, Y_data_files
def voxel_grid_padding(self,a):
x_d = a.shape[0]
y_d = a.shape[1]
z_d = a.shape[2]
channel = a.shape[3]
resolution = self.resolution
size = [resolution, resolution, resolution,channel]
b = np.zeros(size)
bx_s = 0;bx_e = size[0];by_s = 0;by_e = size[1];bz_s = 0; bz_e = size[2]
ax_s = 0;ax_e = x_d;ay_s = 0;ay_e = y_d;az_s = 0;az_e = z_d
if x_d > size[0]:
ax_s = int((x_d - size[0]) / 2)
ax_e = int((x_d - size[0]) / 2) + size[0]
else:
bx_s = int((size[0] - x_d) / 2)
bx_e = int((size[0] - x_d) / 2) + x_d
if y_d > size[1]:
ay_s = int((y_d - size[1]) / 2)
ay_e = int((y_d - size[1]) / 2) + size[1]
else:
by_s = int((size[1] - y_d) / 2)
by_e = int((size[1] - y_d) / 2) + y_d
if z_d > size[2]:
az_s = int((z_d - size[2]) / 2)
az_e = int((z_d - size[2]) / 2) + size[2]
else:
bz_s = int((size[2] - z_d) / 2)
bz_e = int((size[2] - z_d) / 2) + z_d
b[bx_s:bx_e, by_s:by_e, bz_s:bz_e,:] = a[ax_s:ax_e, ay_s:ay_e, az_s:az_e, :]
return b
def load_single_voxel_grid(self,path):
temp = re.split('_', path.split('.')[-2])
x_d = int(temp[len(temp) - 3])
y_d = int(temp[len(temp) - 2])
z_d = int(temp[len(temp) - 1])
a = np.loadtxt(path)
if len(a)<=0:
print ('load_single_voxel_grid error:', path)
exit()
voxel_grid = np.zeros((x_d, y_d, z_d,1))
for i in a:
voxel_grid[int(i[0]), int(i[1]), int(i[2]),0] = 1 # occupied
#Data.plotFromVoxels(voxel_grid)
voxel_grid = self.voxel_grid_padding(voxel_grid)
voxel_grid = voxel_grid.transpose([3,0,1,2])
return voxel_grid
def load_X_Y_voxel_grids(self,X_data_files, Y_data_files):
if len(X_data_files) !=self.batch_size or len(Y_data_files)!=self.batch_size:
print ('load_X_Y_voxel_grids error:', X_data_files, Y_data_files)
exit()
X_voxel_grids = []
Y_voxel_grids = []
index = -1
for X_f, Y_f in zip(X_data_files, Y_data_files):
index += 1
X_voxel_grid = self.load_single_voxel_grid(X_f)
X_voxel_grids.append(X_voxel_grid)
Y_voxel_grid = self.load_single_voxel_grid(Y_f)
Y_voxel_grids.append(Y_voxel_grid)
X_voxel_grids = np.asarray(X_voxel_grids)
Y_voxel_grids = np.asarray(Y_voxel_grids)
return X_voxel_grids, Y_voxel_grids
class train_original_3DRecGAN(BaseDataLoader):
def __init__(self, data_dir, batch_size, shuffle=True, num_workers=1, training=True):
self.dataset = Train_Dataset(data_dir)
self.batch_size = batch_size
super().__init__(self.dataset, batch_size, shuffle, num_workers)
class test_original_3DRecGAN(BaseDataLoader):
def __init__(self, data_dir, batch_size, shuffle=True, num_workers=1, training=True):
self.dataset = Test_Dataset(data_dir)
super().__init__(self.dataset, batch_size, shuffle, num_workers)
class Train_Dataset(torch.utils.data.Dataset):
def __init__(self,path):
self.root = path
# fetch training data
self.data = Data(self.root)
def __len__(self):
return len(self.data.X_train_files)
def __getitem__(self, index):
# return training data
X = self.data.load_single_voxel_grid(self.data.X_train_files[index])
Y = self.data.load_single_voxel_grid(self.data.Y_train_files[index])
return X,Y
class Test_Dataset(torch.utils.data.Dataset):
def __init__(self,path):
self.root = path
# fetch test data
self.data = Data(self.root)
def __len__(self):
return len(self.data.X_test_files)
def __getitem__(self, index):
X = self.data.load_single_voxel_grid(self.data.X_test_files[index])
Y = self.data.load_single_voxel_grid(self.data.Y_test_files[index])
return X,Y |
import json
from pymongo import MongoClient
def save_to_json(custom_object):
file = "../../output.json"
try:
with open(file) as f:
data = json.load(f)
data["tweets"].append(custom_object)
f.close()
with open(file, 'w') as f:
json.dump(data, f, indent=2)
f.close()
except (Exception):
print("failed to save a tweet!")
def save_to_txt(custom_object):
file = "../../output/output.txt"
with open(file, "a") as f:
line = ""
for value in custom_object.items():
line+= (str(value[1].encode("utf-8"))[2:])[:-1]
line+= "\t"
f.write(line+"\n")
def save_to_mongo(collection_name,custom_object):
# http://api.mongodb.com/python/current/tutorial.html
client = MongoClient()
db = client.twitterapitest
collection = db[collection_name]
result = collection.insert_one(custom_object).inserted_id
|
# -*- coding: utf-8 -*
import math
import maya.api.OpenMaya as OpenMaya
nodeName = 'mlEulerToExpmap'
expmapName = ('expmap', 'em', u'Expmap')
expmapElementName = (('expmapX', 'emx', 'Expmap X'),
('expmapY', 'emy', 'Expmap Y'),
('expmapZ', 'emz', 'Expmap Z'))
rotateName = ('rotate', 'ir', u'回転')
eulerAngleName = (('rotateX', 'rx', u'回転 X'),
('rotateY', 'ry', u'回転 Y'),
('rotateZ', 'rz', u'回転 Z'))
rotateOrderName = ('rotateOrder', 'ro', u'回転順序')
def maya_useNewAPI():
pass
class mlEulerToExpmap(OpenMaya.MPxNode):
"""euler to expmap"""
rotate = OpenMaya.MObject()
eulerAngles = []
expmap = OpenMaya.MObject()
expmapElement = []
def __init__(self):
OpenMaya.MPxNode.__init__(self)
@staticmethod
def creator():
return mlEulerToExpmap()
@staticmethod
def initialize():
# output expmap
cAttr = OpenMaya.MFnCompoundAttribute()
mlEulerToExpmap.expmap = cAttr.create(expmapName[0], expmapName[1])
cAttr.setNiceNameOverride(expmapName[2])
mlEulerToExpmap.expmapElement = []
for i in xrange(0, 3):
nAttr = OpenMaya.MFnNumericAttribute()
mlEulerToExpmap.expmapElement = mlEulerToExpmap.expmapElement + \
[nAttr.create(expmapElementName[i][0],
expmapElementName[i][1],
OpenMaya.MFnNumericData.kDouble,
0.0)]
nAttr.setNiceNameOverride(expmapElementName[i][2])
nAttr.keyable = False
nAttr.writable = False
cAttr.addChild(mlEulerToExpmap.expmapElement[i])
mlEulerToExpmap.addAttribute(mlEulerToExpmap.expmap)
# input Euler angles
cAttr = OpenMaya.MFnCompoundAttribute()
mlEulerToExpmap.rotate = cAttr.create(rotateName[0], rotateName[1])
cAttr.setNiceNameOverride(rotateName[2])
mlEulerToExpmap.eulerAngles = []
for i in xrange(0, 3):
uAttr = OpenMaya.MFnUnitAttribute()
mlEulerToExpmap.eulerAngles = mlEulerToExpmap.eulerAngles + \
[uAttr.create(eulerAngleName[i][0],
eulerAngleName[i][1],
OpenMaya.MFnUnitAttribute.kAngle,
0.0)]
uAttr.setNiceNameOverride(eulerAngleName[i][2])
uAttr.keyable = True
uAttr.readable = False
cAttr.addChild(mlEulerToExpmap.eulerAngles[i])
mlEulerToExpmap.addAttribute(mlEulerToExpmap.rotate)
mlEulerToExpmap.attributeAffects(mlEulerToExpmap.rotate, mlEulerToExpmap.expmap)
# input rotation order
nAttr = OpenMaya.MFnNumericAttribute()
mlEulerToExpmap.rotateOrder = nAttr.create(rotateOrderName[0],
rotateOrderName[1],
OpenMaya.MFnNumericData.kInt,
0)
nAttr.setNiceNameOverride(rotateOrderName[2])
nAttr.readable = False
mlEulerToExpmap.addAttribute(mlEulerToExpmap.rotateOrder)
mlEulerToExpmap.attributeAffects(mlEulerToExpmap.rotateOrder, mlEulerToExpmap.expmap)
def compute(self, plug, dataBlock):
if plug is not self.expmap and plug not in self.expmapElement:
return
r = [0, 0, 0]
for i in xrange(0, 3):
rHandle = dataBlock.inputValue(mlEulerToExpmap.eulerAngles[i])
r[i] = rHandle.asDouble()
roHandle = dataBlock.inputValue(mlEulerToExpmap.rotateOrder)
rotateOrder = roHandle.asInt()
eulerRotation = OpenMaya.MEulerRotation(r[0], r[1], r[2], rotateOrder)
q = eulerRotation.asQuaternion()
if q.w < 0:
q = -q
if math.fabs(q.w) > 1.0 - 1.0e-6:
a = 0.0
isina = 0.0
else:
a = math.acos(q.w)
isina = a / math.sin(a)
ln = (q.x * isina, q.y * isina, q.z * isina)
for i in xrange(0, 3):
outputHandle = dataBlock.outputValue(mlEulerToExpmap.expmapElement[i])
outputHandle.setDouble(ln[i])
dataBlock.setClean(plug)
|
#!/usr/bin/env python
#
# Prints a concise summary of a benchmark output as a TSV blob.
#
# Example usage:
#
# $ BenchmarkXXX_DEVICE > bench.out
# $ benchSummary.py bench.out
#
# Options SortByType, SortByName, or SortByMean may be passed after the
# filename to sort the output by the indicated quantity. If no sort option
# is provided, the output order matches the input. If multiple options are
# specified, the list will be sorted repeatedly in the order requested.
import re
import sys
assert(len(sys.argv) >= 2)
# Parses "*** vtkm::Float64 ***************" --> vtkm::Float64
typeParser = re.compile("\\*{3} ([^*]+) \\*{15}")
# Parses "Benchmark 'Benchmark name' results:" --> Benchmark name
nameParser = re.compile("Benchmark '([^-]+)' results:")
# Parses "mean = 0.0125s" --> 0.0125
meanParser = re.compile("\\s+mean = ([0-9.Ee+-]+)s")
# Parses "std dev = 0.0125s" --> 0.0125
stdDevParser = re.compile("\\s+std dev = ([naN0-9.Ee+-]+)s")
filename = sys.argv[1]
benchFile = open(filename, 'r')
sortOpt = None
if len(sys.argv) > 2:
sortOpt = sys.argv[2:]
class BenchKey:
def __init__(self, name_, type_):
self.name = name_
self.type = type_
def __eq__(self, other):
return self.name == other.name and self.type == other.type
def __lt__(self, other):
if self.name < other.name: return True
elif self.name > other.name: return False
else: return self.type < other.type
def __hash__(self):
return (self.name + self.type).__hash__()
class BenchData:
def __init__(self, mean_, stdDev_):
self.mean = mean_
self.stdDev = stdDev_
def parseFile(f, benchmarks):
type = ""
bench = ""
mean = -1.
stdDev = -1.
for line in f:
typeRes = typeParser.match(line)
if typeRes:
type = typeRes.group(1)
continue
nameRes = nameParser.match(line)
if nameRes:
name = nameRes.group(1)
continue
meanRes = meanParser.match(line)
if meanRes:
mean = float(meanRes.group(1))
continue
stdDevRes = stdDevParser.match(line)
if stdDevRes:
stdDev = float(stdDevRes.group(1))
# stdDev is always the last parse for a given benchmark, add entry now
benchmarks[BenchKey(name, type)] = BenchData(mean, stdDev)
mean = -1.
stdDev = -1.
continue
benchmarks = {}
parseFile(benchFile, benchmarks)
# Sort keys by type:
keys = benchmarks.keys()
if sortOpt:
for opt in sortOpt:
if opt.lower() == "sortbytype":
keys = sorted(keys, key=lambda k: k.type)
elif opt.lower() == "sortbyname":
keys = sorted(keys, key=lambda k: k.name)
elif opt.lower() == "sortbymean":
keys = sorted(keys, key=lambda k: benchmarks[k].mean)
print("# Summary: (%s)"%filename)
print("%-9s\t%-9s\t%-s"%("Mean", "Stdev", "Benchmark (type)"))
for key in keys:
data = benchmarks[key]
print("%9.6f\t%9.6f\t%s (%s)"%(data.mean, data.stdDev, key.name, key.type))
|
import csv
import io
import librosa as lr
import matplotlib.pyplot as plt
import numpy as np
plt.interactive(False)
#data_saved ,Sales_precision, Customers_precision ayarlanmalı
print('lets go')
#import datasets
keys = []
X = [] # feature vector array
Y_Sales = [] # feature target array
Y_Customers = [] # feature target array
target_sales_row= 0
target_customers_row = 0
feature_row = []
Sales_precision = 1000
Customers_precision = 100
def import_csv( filename ):
# import datasets
global keys
global X # feature vector array
global Y_Sales # feature target array
global Y_Customers # feature target array
global target_sales_row
global target_customers_row
global feature_row
with open(filename,'rU') as f:
reader = csv.reader(f)
Keyget = False
first = True
rownum = 1
for row in reader:
print(rownum)
rownum +=1
if Keyget:
for i, value in enumerate(row[0].split(";")):
if "Store Type" in keys[i]:
#decider[keys[i]].append(int(value))
feature_row.append(int(value))
elif "Sales" in keys[i]:
target_sales_row=int(round(int(value)/Sales_precision))
elif "Customers" in keys[i]:
target_customers_row = int(round(int(value)/Customers_precision))
elif "Store" not in keys[i]:
# decider[keys[i]].append(int(value))
feature_row.append(int(value))
else:
for x in range(1,1116):
if(int(value) == x):
feature_row.append(1)
else:
feature_row.append(0)
#feature_row = np.transpose(feature_row)
#target_row = np.transpose(target_row)
# if first:
# X =np.concatenate([X,np.array(feature_row).T])
# X = X.reshape((1,X.size))
# first = False
# else:
# X =np.concatenate([X,np.array(feature_row).reshape((1,np.array(feature_row).size))])
# X = np.concatenate([X, np.array(feature_row).T])
# Y_Sales = np.append(Y_Sales,target_sales_row)
# Y_Customers =np.append(Y_Customers,target_customers_row)
X.append(np.array(feature_row).reshape((1,np.array(feature_row).size))[0])
Y_Sales.append( target_sales_row)
Y_Customers.append(target_customers_row)
feature_row = []
else:
# first row
Keyget = True
for stri in row[0].split(";"):
if(stri == "Store"):
for x in range(1,1116):
keys.append(stri+str(x))
else:
keys.append(stri)
# you now have a column-major 2D array of your file.
import pickle
def save_imported_data( filename ):
global X # feature vector array
global Y_Sales # feature target array
global Y_Customers # feature target array
# Saving the objects:
with open(filename, 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump([X, Y_Sales, Y_Customers], f)
def load_imported_data(filename):
global X # feature vector array
global Y_Sales # feature target array
global Y_Customers # feature target array
# Getting back the objects:
with open(filename,'rb') as f: # Python 3: open(..., 'rb')
X, Y_Sales, Y_Customers = pickle.load(f)
def save_trained_data( filename , X_Sales_train, X_Sales_test, Y_Sales_train, Y_Sales_test , nn):
# Saving the objects:
with open(filename, 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump([X_Sales_train, X_Sales_test, Y_Sales_train, Y_Sales_test, nn], f)
def load_trained_data(filename):
# Getting back the objects:
with open(filename, 'rb') as f: # Python 3: open(..., 'rb')
return pickle.load(f)
data_saved = False
if data_saved:
load_imported_data('objs.pkl')
print("Data load complete")
else:
import_csv('traindata9.csv')
print("Data pull complete")
save_imported_data('objs .pkl')
print("Data save complete")
# machine learning part
import Machine_learn
from sklearn import cross_validation
nn_Sales = Machine_learn.NN_1HL(hidden_layer_size=50)
nn_Customers = Machine_learn.NN_1HL(hidden_layer_size=50)
# import sklearn.datasets as datasets
# iris = datasets.load_iris()
# x = iris.data
# y = iris.target
#file = open("X.txt", "w")
#for item in decider.X:
# file.write("%s\n" % item)
#file.close()
#file = open("Y.txt", "w")
#file.write(decider.Y)
#file.close()
# X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(x, y, test_size=0.40)
X_Sales_train, X_Sales_test, Y_Sales_train, Y_Sales_test = cross_validation.train_test_split(np.array(X), np.array(Y_Sales),stratify=Y_Sales, test_size=0.20)
X_customers_train, X_customers_test, Y_customers_train, Y_customers_test = cross_validation.train_test_split(np.array(X), np.array(Y_Customers),stratify=Y_Customers, test_size=0.20)
print("Test and Train data seperated")
num_labels = len(set(Y_Sales_train))
if(num_labels == max(Y_Sales_train)+1):
print("Sales data ok")
else:
print("Sales data error")
num_labels = len(set(Y_customers_train))
if(num_labels == max(Y_customers_train)+1):
print("Customer data ok")
else:
print("Customer data error")
nn_Sales.fit(X_Sales_train, Y_Sales_train)
from matplotlib import pyplot as plt
plt.figure(1)
print("Sales Trained")
data_saved = False
if data_saved:
X_Sales_train, X_Sales_test, Y_Sales_train, Y_Sales_test, nn_Sales = load_trained_data('objs_sales.pkl')
print("Data load complete")
else:
#save_trained_data('objs_sales.pkl',X_Sales_train, X_Sales_test, Y_Sales_train, Y_Sales_test,nn_Sales)
print("Data save complete")
from sklearn.metrics import accuracy_score
predictions_Sales = nn_Sales.predict(X_Sales_test)
score_Sales=accuracy_score(Y_Sales_test, nn_Sales.predict(X_Sales_test))
print("accuracy Sales: " , score_Sales )#
## The line / model
plt.subplot(221)
plt.scatter(Y_Sales_test, predictions_Sales)
plt.xlabel('True Values')
plt.ylabel('Predictions')
plt.title('Sales')
nn_Customers.fit(X_customers_train, Y_customers_train)
print("Customers Trained")
data_saved = False
if data_saved:
X_customers_train, X_customers_test, Y_customers_train, Y_customers_test, nn_Customers = load_trained_data('objs_customers.pkl')
print("Data load complete")
else:
#save_trained_data('objs_customers.pkl', X_customers_train, X_customers_test, Y_customers_train, Y_customers_test ,nn_Customers)
print("Data save complete")
predictions_Customers =nn_Customers.predict(X_customers_test)
score_Customers=accuracy_score(Y_customers_test,predictions_Customers )
## The line / model
plt.subplot(222)
plt.scatter(Y_customers_test, predictions_Customers)
plt.xlabel('True Values')
plt.ylabel('Predictions')
plt.title('Customers')
print("accuracy Customers: " , score_Customers )#
plt.show() |
import pandas as pd
def dataframe_summary(df):
"""
:param df: dataframe
:return: dataframe of:
column name
type of object
number of distinct values
unique values (showsa a maximum of 10 unique values)
missing values
"""
print("Database has {:,} rows and {:,} columns".format(df.shape[0], df.shape[1]))
# return if no rows
if df.shape[0] == 0:
return
# type
data_types = pd.DataFrame(df.dtypes, columns=["Type"])
# values
applied = df.apply(lambda x: [x.unique()[:10]])
applied = pd.DataFrame(applied.T).rename(columns={0: 'Values'})
# % missing
missing = pd.DataFrame(round(df.isnull().sum() / len(df) * 100, 2)).rename(columns={0: '% Missing'})
# distinct values
applied_counts = df.apply(lambda x: len(x.unique()))
applied_counts = pd.DataFrame(applied_counts, columns=["Distinct values"])
# concatenation
final_df = pd.concat([data_types, applied_counts, applied, missing], axis=1)
return final_df
|
# -*- coding: UTF-8 -*-
# Copyright 2016-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
'''
Causes one or several :xfile:`help_texts.py` files to be generated
after each complete build of the doctree.
See :doc:`/dev/help_texts` for a topic overview.
Usage
=====
In your :xfile:`conf.py` file, add
:mod:`lino.sphinxcontrib.help_texts_extractor` to your ``extensions``
and define a ``help_texts_builder_targets`` setting::
extensions += ['lino.sphinxcontrib.help_texts_extractor']
help_texts_builder_targets = {
'lino_algus.': 'lino_algus.lib.algus'
}
Internals
=========
This builder traverses the doctree in order to find `object
descriptions
<http://www.sphinx-doc.org/en/stable/extdev/nodes.html>`_, i.e. text
nodes defined by Sphinx and inserted e.g. by the :rst:dir:`class` and
:rst:dir:`attribute` directives (which potentially have been inserted
by autodoc and autosummary).
Example of a class description::
<desc desctype="class" domain="py" noindex="False" objtype="class">
<desc_signature class="" first="False" fullname="Plan" ids="..." module="..." names="...">
<desc_annotation>class </desc_annotation>
<desc_addname>lino_xl.lib.invoicing.models.</desc_addname>
<desc_name>Plan</desc_name>
<desc_parameterlist>
<desc_parameter>*args</desc_parameter>
<desc_parameter>**kwargs</desc_parameter>
</desc_parameterlist>
</desc_signature>
<desc_content>
<paragraph>Bases: <reference internal="False" reftitle="(in Lino v1.7)" refuri="http://www.lino-framework.org/api/lino.modlib.users.mixins.html#lino.modlib.users.mixins.UserAuthored"><literal classes="xref py py-class">lino.modlib.users.mixins.UserAuthored</literal></reference>
</paragraph>
<paragraph>An <strong>invoicing plan</strong> is a rather temporary database object which represents the plan of a given user to have Lino generate a series of invoices.
</paragraph>
<index entries="..."/>
<desc desctype="attribute" objtype="attribute">
<desc_signature class="Plan" first="False" fullname="Plan.user" ids="..." module="..." names="...">
<desc_name>user</desc_name>
</desc_signature>
<desc_content/>
</desc>
<desc desctype="attribute" ... objtype="attribute">
<desc_signature class="Plan" first="False" fullname="Plan.journal" ids="..." module="..." names="...">
<desc_name>journal</desc_name>
</desc_signature>
<desc_content>
<paragraph>The journal where to create invoices. When this field is
empty, you can fill the plan with suggestions but cannot
execute the plan.</paragraph>
</desc_content>
</desc>
...
Example of a field description::
<desc desctype="attribute" domain="py" noindex="False" objtype="attribute">
<desc_signature class="Plan" first="False" fullname="Plan.journal"
ids="lino_xl.lib.invoicing.models.Plan.journal"
module="lino_xl.lib.invoicing.models"
names="lino_xl.lib.invoicing.models.Plan.journal">
<desc_name>journal</desc_name>
</desc_signature>
<desc_content>
<paragraph>
The journal where to create invoices. When this field is
empty, you can fill the plan with suggestions but cannot
execute the plan.
</paragraph>
</desc_content>
</desc>
'''
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import six
from docutils import nodes
from docutils import core
from sphinx import addnodes
from importlib import import_module
from unipath import Path
from lino.core.utils import simplify_name
useless_starts = set(['lino.core'])
useless_endings = set(['.My', '.ByUser'])
# useless_endings = set(['.VentilatingTable', '.My', '.ByUser',
# '.Table', '.AbstractTable', '.VirtualTable',
# '.Actor'])
HEADER = """# -*- coding: UTF-8 -*-
# generated by lino.sphinxcontrib.help_text_builder
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
"""
def node2html(node):
parts = core.publish_from_doctree(node, writer_name="html")
return parts['body']
class HelpTextExtractor(object):
def initialize(self, app):
self.name2dict = dict()
self.name2file = dict()
# we must write our files only when all documents have been
# processed (i.e. usually after a "clean")
self.docs_processed = 0
targets = app.env.config.help_texts_builder_targets
# print(20160725, targets)
for root, modname in targets.items():
mod = import_module(modname)
htf = Path(mod.__file__).parent.child('help_texts.py')
# if not htf.exists():
# raise Exception("No such file: {}".format(htf))
self.name2file[root] = htf
self.name2dict[root] = OrderedDict()
print("Collecting help texts for {}".format(
' '.join(self.name2file.values())))
def extract_help_texts(self, app, doctree):
# if docname != 'api/lino_xl.lib.invoicing.models':
# return
# print(doctree)
# return
# for node in doctree.traverse():
# self.node_classes.add(node.__class__)
for node in doctree.traverse(addnodes.desc):
if node['domain'] == 'py':
if node['objtype'] == 'class':
self.store_content(node)
elif node['objtype'] == 'attribute':
self.store_content(node)
# for node in doctree.traverse(nodes.field):
# self.fields.add(node.__class__)
self.docs_processed += 1
def write_help_texts_files(self, app, exception):
if exception:
return
if self.docs_processed < len(app.env.found_docs):
app.info(
"Don't write help_texts.py files because "
"only {0} of {1} docs have been processed".format(
self.docs_processed,
len(app.env.found_docs)))
return
for k, fn in self.name2file.items():
texts = self.name2dict.get(k, None)
if not texts:
app.info("No help texts for {}".format(k))
continue
# fn = os.path.join(self.outdir, 'help_texts.py')
print("Writing {} help texts for {} to {}".format(
len(texts), k, fn))
fd = open(fn, "w")
def writeln(s):
if six.PY2:
s = s.encode('utf-8')
fd.write(s)
fd.write("\n")
writeln(HEADER)
writeln("help_texts = {")
for k, v in texts.items():
writeln(''' '{}' : _("""{}"""),'''.format(k, v))
writeln("}")
fd.close()
def store_content(self, node):
sig = []
content = []
for c in node.children:
if isinstance(c, addnodes.desc_content):
for cc in c.children:
if isinstance(cc, nodes.paragraph):
p = cc.astext()
if not p.startswith("Bases:"):
if len(content) == 0:
content.append(p)
elif isinstance(c, addnodes.desc_signature):
sig.append(c)
# if len(sig) != 1:
# raise Exception("sig is {}!".format(sig))
sig = sig[0]
# sig = list(node.traverse(addnodes.desc_signature))[0]
# content = [
# p.astext() for p in node.traverse(addnodes.desc_content)]
# content = [p for p in content if not p.startswith("Bases:")]
if not content:
return
content = '\n'.join(content)
if '"""' in content:
msg = '{} : First paragraph of content may not contain \'"""\'. '
raise Exception(msg.format(sig['names'][0]))
if content.startswith('"'):
content = " " + content
if content.endswith('"'):
content += " "
# msg = '{} : First paragraph of content may not end with \'"\'.'
# self.warn(msg.format(sig['names'][0]))
for name in sig['names']:
self.sig2dict(name, content)
def sig2dict(self, name, value):
for e in useless_starts:
if name.startswith(e):
return
for e in useless_endings:
if name.endswith(e):
return
name = simplify_name(name)
for root, d in self.name2dict.items():
if name.startswith(root):
d[name] = value
def setup(app):
hte = HelpTextExtractor()
app.add_config_value('help_texts_builder_targets', {}, 'env')
app.connect('builder-inited', hte.initialize)
app.connect('doctree-read', hte.extract_help_texts)
app.connect('build-finished', hte.write_help_texts_files)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from psm import EventRecordable
class Trigger(EventRecordable):
""" Trigger serves as the entry point for a psm's generation and state recovery.
Also, trigger holds all the data inputs to the psm from the outside world!
"""
fields = {}
_psm_data_prefix = ""
def __init__(self, logid=None, name=None, data={}, timestamp=None):
self.logid = logid
if name:
self.name = name
else:
self.name = self.__class__.__name__
self.data = data
self.timestamp = timestamp
@classmethod
def recoverSpecific(cls, event):
"For all Trigger's subclasses' recovery from log"
trigger = cls(event.event_id, event.event_name, event.data, event.timestamp)
trigger.regularize()
return trigger
def log(self, logger):
logger.logTrigger(self.__class__.__name__, self.data)
def store(self, data):
self.data = data
self.regularize()
def aggregate_data(self, aggregated_data):
if not self.data:
return
prefix = self._psm_data_prefix
if prefix not in aggregated_data:
if prefix == 'trainingloss':
aggregated_data[prefix] = []
else:
aggregated_data[prefix] = {}
if prefix == 'trainingloss':
aggregated_data[prefix].append(self.data)
else:
aggregated_data[prefix] = {**self.data, **aggregated_data[prefix]}
def regularize(self):
result = copy.deepcopy(self.data)
for field in self.fields:
if field not in self.data:
raise AttributeError('Required field <{}> not found'.format(field))
result[field] = self.fields[field](self.data[field])
self.data = result
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Vincent Batoufflet.
#
# License: BSD, see LICENSE for more details.
#
from flask import Response, flash, redirect, render_template, request, session, url_for
from flask.ext.babel import _
from plume import ATOM_LIMIT, FILE_PREFIX, HELP_PREFIX
from plume.backend import *
from plume.exception import *
from plume.renderer import render_diff, render_document
from werkzeug.contrib.atom import AtomFeed
from werkzeug.http import http_date
def do_atom(path=None):
feed = AtomFeed('Changes' + (' - %s' % path if path != '/' else ''), feed_url=request.url, url=request.url_root)
history = []
if path != '/':
for entry in get_history(path):
entry.insert(1, path)
history.append(entry)
if len(history) == ATOM_LIMIT:
break
else:
for path in get_documents_list():
for entry in get_history(path):
entry.insert(1, path)
history.append(entry)
history = sorted(history, key=lambda x: x[0], reverse=True)[:ATOM_LIMIT]
for date, path, rev, author, desc in history:
feed.add(path, desc if desc != '-' else 'No summary available',
url=url_for('index', path=path, do='compare', to=rev),
author=author,
updated=date)
return feed.get_response()
def do_compare(path):
if path.startswith(FILE_PREFIX):
raise InvalidRequest()
rev_to = int(request.args.get('to', get_last_revision(path)))
rev_from = int(request.args.get('from', rev_to - 1))
# Render document diff
content = render_diff(get_file_path(path, rev=rev_from), get_file_path(path, rev=rev_to))
return render_template('wiki/compare.html', content=content)
def do_delete(path):
if request.method == 'POST':
if request.form.get('validate'):
# Save empty document
save_document(path, '', user=session.get('login'))
return redirect(url_for('index', path=path))
return render_template('wiki/delete.html')
def do_edit(path):
if request.method == 'POST':
if 'save' in request.form:
# Save new document revision
if path.startswith(FILE_PREFIX):
attachment = request.files.get('attachment')
if not path[len(FILE_PREFIX):]:
unnamed = True
path = FILE_PREFIX + attachment.filename
else:
unnamed = False
if unnamed or check_mimetype(path, attachment.filename):
save_document(path, attachment.stream.read(), user=session.get('login'))
else:
flash(_('Document type differs from the previous one.'), 'error')
return redirect(url_for('index', path=path, do='edit'))
else:
save_document(path, request.form.get('data'), user=session.get('login'))
# Unlock document
lock_document(path, (session.get('login'), session.get('id')), unlock=True)
return redirect(url_for('index', path=path))
return do_show(path, action='edit')
def do_history(path):
return render_template('wiki/history.html', history=get_history(path), is_file=path.startswith(FILE_PREFIX))
def do_move(path):
if request.method == 'POST':
if request.form.get('validate'):
try:
move_document(path, request.form.get('path'))
path = request.form.get('path')
except DocumentAlreadyExists:
flash(_('A document with the same path already exists.'), 'error')
return redirect(url_for('index', path=path, do='move'))
return redirect(url_for('index', path=path))
return render_template('wiki/move.html')
def do_render(path, toc=False):
if path.startswith(FILE_PREFIX):
raise InvalidRequest()
return do_show(path, action='render')
def do_show(path, action='show'):
content = None
rev_info = None
lock_info = None
# Handle help document
if path.startswith(HELP_PREFIX):
if action not in ('render', 'show'):
raise InvalidRequest()
# Get document data
try:
mimetype, size, content = get_help(path[len(HELP_PREFIX):])
except:
mimetype, size, content = None
if action == 'render':
return render_document(content)
else:
return Response(render_template('wiki/show.html', content=content, immutable=True),
404 if content is None else 200)
# Check for file document
is_file = path.startswith(FILE_PREFIX)
# Unlock document if needed
if request.args.get('discard'):
lock_document(path, (session.get('login'), session.get('id')), unlock=True)
return redirect(url_for('index', path=path))
elif action == 'edit' and request.args.get('force'):
lock_document(path, (session.get('login'), session.get('id')), unlock=True, force=True)
return redirect(url_for('index', path=path, do='edit'))
# Lock document for edition
if action == 'edit':
lock_info = lock_document(path, (session.get('login'), session.get('id')))
# Get document data
try:
mimetype, size, meta, content = get_document(path, request.args.get('rev'),
meta=(action == 'edit'), preview=is_file)
except DocumentNotFound:
mimetype, size, meta, content = None, 0, None, None
if action == 'edit':
meta = None
# Handle normal documents
if action == 'edit' and 'data' in request.form:
content = request.form.get('data')
else:
if content is not None:
# Get last revision information
rev_info = get_history(path, rev=request.args.get('rev', -1))
elif is_file and not path[len(FILE_PREFIX):]:
# Force edition if unnamed file
action = 'edit'
# Render document and exit if needed
if action == 'render':
return render_document(content)
response = Response(render_template('wiki/%s.html' % action, is_file=is_file, mimetype=mimetype,
meta=meta, content=content, rev_info=rev_info, lock_info=lock_info), 404 if content is None else 200)
if action == 'edit':
response.cache_control.no_cache = True
return response
def do_source(path):
file_path = get_file_path(path, request.args.get('rev'))
if request.if_modified_since and \
datetime.utcfromtimestamp(int(os.path.getmtime(file_path))) <= request.if_modified_since:
return Response(status=304)
# Show document source
mimetype, size, meta, content = get_document(path, request.args.get('rev'), meta=True)
response = Response(content, direct_passthrough=True)
response.headers.add('Content-Length', size)
response.headers.add('Content-Type', mimetype if path.startswith(FILE_PREFIX) else 'text/plain; charset=utf-8')
if request.args.get('download'):
response.headers.add('Content-Disposition', 'attachment; filename=' + path[len(FILE_PREFIX):])
response.headers.add('Last-Modified', http_date(os.path.getmtime(file_path)))
return response
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import logging
from asyncio import StreamReader, StreamWriter
from typing import AsyncGenerator, Optional
from idb.common.types import IdbException
from idb.grpc.idb_grpc import CompanionServiceStub
from idb.grpc.idb_pb2 import DapRequest, DapResponse
from idb.grpc.stream import Stream
from idb.utils.contextlib import asynccontextmanager
from idb.utils.typing import none_throws
class RemoteDapServer:
"""
Manage the connection to the remote dap server spawn by the companion
"""
def __init__(
self,
stream: Stream[DapRequest, DapResponse],
logger: logging.Logger,
) -> None:
self._stream = stream
self.logger = logger
@staticmethod
@asynccontextmanager
async def start(
stub: CompanionServiceStub, logger: logging.Logger, pkg_id: str
) -> AsyncGenerator["RemoteDapServer", None]:
"""
Created a RemoteDapServer starting a new grpc stream and sending a start dap server request to companion
"""
logger.info("Starting dap connection")
async with stub.dap.open() as stream:
await stream.send_message(
DapRequest(start=DapRequest.Start(debugger_pkg_id=pkg_id))
)
response = await stream.recv_message()
logger.debug(f"Dap response after start request: {response}")
if response and response.started:
logger.info("Dap stream ready to receive messages")
dap_server = RemoteDapServer(
stream=stream,
logger=logger,
)
try:
yield dap_server
finally:
await dap_server.__stop()
else:
logger.error(f"Starting dap server failed! {response}")
raise IdbException("Failed to spawn dap server.")
logger.info("Dap grpc stream is closed.")
async def pipe(
self,
input_stream: StreamReader,
output_stream: StreamWriter,
stop: asyncio.Event,
) -> None:
"""
Pipe stdin and stdout to remote dap server
"""
read_future: Optional[asyncio.Future[StreamReader]] = None
write_future: Optional[asyncio.Future[StreamWriter]] = None
stop_future = asyncio.ensure_future(stop.wait())
while True:
if read_future is None:
read_future = asyncio.ensure_future(self._stream.recv_message())
if write_future is None:
write_future = asyncio.ensure_future(
read_next_dap_protocol_message(input_stream)
)
done, pending = await asyncio.wait(
[read_future, write_future, stop_future],
return_when=asyncio.FIRST_COMPLETED,
)
if stop_future in done:
self.logger.debug("Received stop command! Closing stream...")
read_future.cancel()
self.logger.debug("Read future cancelled!")
write_future.cancel()
self.logger.debug("Write future cancelled!")
break
if write_future in done:
data = none_throws(write_future.result())
write_future = None
await self._stream.send_message(
DapRequest(pipe=DapRequest.Pipe(data=data))
)
if read_future in done:
self.logger.debug("Received a message from companion.")
result = none_throws(read_future.result())
read_future = None
if result is None:
# Reached the end of the stream
break
output_stream.write(result.stdout.data)
async def __stop(self) -> None:
"""
Stop remote dap server and end grpc stream
"""
self.logger.debug("Sending stop dap request to close the stream.")
await self._stream.send_message(DapRequest(stop=DapRequest.Stop()))
await self._stream.end()
response = await self._stream.recv_message()
if response and not response.stopped:
self.logger.error(f"Dap server failed to stop: {response}")
else:
self.logger.info(f"Dap server successfully stopped: {response}")
async def read_next_dap_protocol_message(stream: StreamReader) -> bytes:
content_length = await stream.readuntil(b"\r\n\r\n")
length = content_length.decode("utf-8").split(" ")[1]
body = await stream.read(int(length))
return content_length + body
|
"""Update-related models.
.. moduleauthor:: Glen Larsen, glenl.glx at gmail.com
"""
from django.db import models
from mutopia.models import Piece, Instrument
class InstrumentMap(models.Model):
"""Normalize instruments by mapping names to specific instruments.
We want users to specify known instruments in the
:class:`mutopia.models.Instrument` table but this is not easily
regulated with user input. The ``RawInstrumentMap`` maps these
un-regulated names to rows in the
:class:`mutopia.models.Instrument` table. This table can be used
for nicknames (*uke* ==> *ukulele*) as well as common
misspellings, plurals, or foreign names.
"""
#:An instrument name that may be a nickname (*uke*) or a common
#:non-English name (*guitarre*) that can be mapped to a name in
#:the :class:`mutopia.models.Instrument` table.
raw_instrument = models.CharField(primary_key=True, max_length=64)
#:Reference to the :class:`mutopia.models.Instrument` table
instrument = models.ForeignKey(Instrument, models.CASCADE)
@classmethod
def translate(cls, candidate):
"""Match a name to an internally known instrument.
Given an instrument name of dubious quality, attempt to
translate it to a pre-defined set of instrument names. The
goal is smooth and accurate searches: we want users to be able
to find music for a ukulele whether they search using the
string *uke* or *ukulele*.
If the given instrument matches one in the
:class:`mutopia.Instrument` table, just return that name.
Otherwise, look for match in
:class:`mutopia.RawInstrumentMap`.
:param str candidate: The candidate instrument name
:return: The matched (normalized) instrument
:rtype: Instrument
"""
# First try the Instrument table directly
try:
instr = Instrument.objects.get(pk=candidate.capitalize())
return instr
except Instrument.DoesNotExist:
# Handle expected exception
pass
try:
imap = InstrumentMap.objects.get(raw_instrument=candidate)
return imap.instrument
except InstrumentMap.DoesNotExist:
# Handle expected exception
pass
return None
def __str__(self):
return self.raw_instrument
|
import codecs
class Node(object):
"""Node
An Object storing Node Data
Vars:
id: 20-byte Array
hash: 160-bit int
Func:
distance: Calculate Distance between two Nodes
"""
def __init__(self, id, remote=None):
"""Node
Args:
id: Node id, a bytes object
"""
self.id = id
self.remote = remote
self.hash = int.from_bytes(id, byteorder="big")
def distance(self, nodeHash):
return self.hash ^ nodeHash
def get_hash_string(self):
return codecs.encode(self.id, "hex").decode()
|
import logging
import psycopg2
from psycopg2 import Warning as DBWarning, Error as DBError
from gevent.lock import RLock
from time import time
from turbo_config import db_host, db_name, db_user, db_pass, db_max_age
# Logger
logger = logging.getLogger('sticks.db')
class DBSession(object):
_instance = None
_conn = None
_initialized = None
_refresh_lock = RLock()
def __new__(cls):
if DBSession._instance is None:
DBSession._instance = object.__new__(cls)
return DBSession._instance
def is_alive(self):
return DBSession._conn is not None and DBSession._conn.closed == 0
def needs_refresh(self):
return (DBSession._initialized is None or
DBSession._initialized + db_max_age < time())
def close(self):
if self.is_alive():
logger.info('Closing DB connection.')
DBSession._conn.close()
@property
def connection(self):
with DBSession._refresh_lock:
if not self.is_alive() or self.needs_refresh():
if not self.needs_refresh():
logger.warning('DB connection died before a refresh.')
self.close()
logger.info('Opening DB connection.')
kwargs = {
'host': db_host,
'database': db_name,
'user': db_user,
'password': db_pass
}
DBSession._conn = psycopg2.connect(**kwargs)
DBSession._initialized = time()
return DBSession._conn
DBWarning
DBError
|
# -*- coding: utf-8 -*-
import logging
from puzzle.models.sql import Suspect
logger = logging.getLogger(__name__)
class SuspectActions(object):
def add_suspect(self, case_obj, variant_obj):
"""Link a suspect to a case."""
new_suspect = Suspect(case=case_obj, variant_id=variant_obj.variant_id,
name=variant_obj.display_name)
self.session.add(new_suspect)
self.save()
return new_suspect
def suspect(self, suspect_id):
"""Fetch a suspect."""
return self.query(Suspect).get(suspect_id)
def delete_suspect(self, suspect_id):
"""De-link a suspect from a case."""
suspect_obj = self.suspect(suspect_id)
logger.debug("Deleting suspect {0}".format(suspect_obj.name))
self.session.delete(suspect_obj)
self.save()
|
"""Module with utilities functions and constants for expression parsing."""
import re
# Dictionary with all supported operations and their priorities.
SUPPORTED_OPERATIONS = {
"+": 1, "-": 1,
"*": 2, "/": 2, "//": 2, "%": 2,
"^": 3,
"<": 4, "<=": 4, "==": 4, "!=": 4, ">=": 4, ">": 4
}
# Regular expression to match operations.
__OPERATION_REGEXP = r'^[\\+-/%^*<>=!]+$'
# Regular expression to match constants and functions names.
__TEXT_REGEXP = r'^[A-Za-z0-9_]+$'
def is_number(number):
""" Determines whether argument is a number or not. """
return number.replace(".", "", 1).isdigit()
def is_operation(text):
""" Determines whether argument is a operation or not. """
return re.match(__OPERATION_REGEXP, text)
def is_text(text):
""" Determines whether argument is a text or not. """
return re.match(__TEXT_REGEXP, text) |
#!/usr/bin/env python3
import nltk
import re
import sys
bo_file = open('citations.da-bo', 'w')
da_file = open('citations.da', 'w')
citationmatcher = r'\<([^\{]+)\{([^\}]+)\}\>'
parenth_stripper = r'\(.+\)'
parenth_only_stripper = r'[\(\)]'
metachars_stripper = r'[#ʁɔː]'
brackets_stripper = r'\[.+?\]'
errors = 0
for line in open(sys.argv[1], 'r'):
line = line.replace("\n", " ").strip()
if not line:
continue
matches = re.match(citationmatcher, line)
try:
bo, da = matches[1], matches[2]
except:
errors += 1
continue
da = ' '.join(nltk.word_tokenize(da))
bo = ' '.join(nltk.word_tokenize(bo))
da = re.sub(metachars_stripper, '', da)
bo = re.sub(metachars_stripper, '', bo)
da = re.sub(brackets_stripper, '', da)
bo = re.sub(brackets_stripper, '', bo)
da_flat, bo_flat = re.sub(parenth_only_stripper, '', da), re.sub(parenth_only_stripper, '', bo)
if da_flat != da or bo_flat != bo:
da_simple, bo_simple = re.sub(parenth_stripper, '', da), re.sub(parenth_stripper, '', bo)
if bo_flat.strip() and da_flat.strip():
bo_file.write(bo_flat + "\n")
da_file.write(da_flat + "\n")
if bo_simple.strip() and da_simple.strip():
bo_file.write(bo_simple + "\n")
da_file.write(da_simple + "\n")
else:
if bo.strip() and da.strip():
bo_file.write(bo + "\n")
da_file.write(da + "\n")
print('-- skipped', errors, 'expressions')
|
from http import HTTPStatus
from flask_restplus import Resource, reqparse, abort
from werkzeug.datastructures import FileStorage
from app.api import file_storage
from app.api.namespaces import aspects
from app.authorization.permissions import EditAspectPermission
from database import db
from database.models import Aspect
@aspects.route('/<int:{}>/image'.format('aspect_id'), endpoint='aspect_image')
@aspects.param('aspect_id', description='Aspect identifier')
class AspectImageResource(Resource):
@aspects.produces(['image/png'])
def get(self, aspect_id):
"""
Get aspect image
"""
aspect = Aspect.query.get(aspect_id)
if aspect is None:
return abort(HTTPStatus.NOT_FOUND, message='Aspect is not found')
if aspect.image_path is None:
return abort(HTTPStatus.NOT_FOUND, 'Aspect image is not found')
return file_storage.download(file_storage.FileCategory.AspectImage, aspect.image_path)
image_payload = reqparse.RequestParser()
image_payload.add_argument('image', required=True, type=FileStorage, location='files', help="New aspect image")
@aspects.expect(image_payload)
@aspects.response(HTTPStatus.FORBIDDEN, description="User is not authorized to edit the aspect")
@aspects.response(HTTPStatus.OK, description="Aspect image successfully changed")
def put(self, aspect_id):
"""
Replace aspect image
* User can set the aspect image **if it does not exists**.
This is done to set the image after creating the aspect
* User with permission to **"edit aspects"** can replace the aspect image
"""
aspect = Aspect.query.get(aspect_id)
if aspect is None:
return abort(HTTPStatus.NOT_FOUND, message='Aspect is not found')
if aspect.image_path is not None:
if not EditAspectPermission.can():
return abort(HTTPStatus.FORBIDDEN, message="User is not authorized to edit the aspect")
args = self.image_payload.parse_args()
token = file_storage.upload(file_storage.FileCategory.AspectImage, args['image'])
aspect.image_path = token.path
db.session.commit()
return "Aspect image successfully changed"
|
# !/usr/bin/env python
# coding=utf-8
# @Time : 2020/4/25 18:07
# @Author : yingyuankai@aliyun.com
# @File : __init__.py
from .bert_for_qa import BertForQA |
# app.py
import os
import sys
from flask import Flask
sys.path.insert(0, os.path.dirname(__file__))
app = Flask(__name__)
@app.route('/')
def index():
return 'hi' |
# -*- coding: utf-8 -*-
"""CLI Box."""
# system module
from typing import TYPE_CHECKING
from uuid import UUID
# community module
import click
# project module
from .utils import output_listing_columns, output_properties
from ..models import MessageBox, MetaDataSession, Module, NoResultFound, Schema, generate_uuid
if TYPE_CHECKING:
from typing import List, Tuple
from click.core import Context
from .utils import TableData, TableHeader
@click.group('box')
def cli_box() -> None:
"""`crcr box`の起点."""
pass
@cli_box.command('list')
@click.pass_context
def box_list(ctx: 'Context') -> None:
"""登録中のメッセージボックス一覧を表示する.
:param Context ctx: Context
"""
message_boxes = MessageBox.query.all()
if len(message_boxes):
data, header = _format_for_columns(message_boxes)
output_listing_columns(data, header)
else:
click.echo('No message boxes are registered.')
def _format_for_columns(message_boxes: 'List[MessageBox]') -> 'Tuple[TableData, TableHeader]':
"""メッセージボックスリストを表示用に加工する.
Args:
message_boxes: メッセージボックスリスト
Return:
data: 加工後のメッセージボックスリスト, header: 見出し
"""
header = ['UUID', 'DISPLAY_NAME']
data: 'TableData' = []
for message_box in message_boxes:
display_name = message_box.display_name
data.append((
str(message_box.uuid),
display_name,
))
return data, header
@cli_box.command('detail')
@click.argument('message_box_uuid', type=UUID)
@click.pass_context
def box_detail(ctx: 'Context', message_box_uuid):
"""モジュールの詳細を表示する.
:param Context ctx: Context
:param UUID message_box_uuid: モジュールUUID
"""
try:
message_box = MessageBox.query.filter_by(uuid=message_box_uuid).one()
except NoResultFound:
click.echo('MessageBox "{}" is not registered.'.format(message_box_uuid))
ctx.exit(code=-1)
data = [
('UUID', str(message_box.uuid)),
('DISPLAY_NAME', message_box.display_name),
('SCHEMA_UUID', str(message_box.schema_uuid)),
('MODULE_UUID', str(message_box.module_uuid)),
('MEMO', message_box.memo or ''),
]
output_properties(data)
@cli_box.command('add')
@click.option('display_name', '--name', required=True)
@click.option('schema_uuid', '--schema', type=UUID, required=True)
@click.option('module_uuid', '--module', type=UUID, required=True)
@click.option('--memo')
@click.pass_context
def box_add(ctx: 'Context', display_name, schema_uuid, module_uuid, memo):
"""メッセージボックスを登録する.
:param Context ctx: Context
:param str display_name: モジュール表示名
:param UUID schema_uuid: スキーマUUID
:param UUID module_uuid: スキーマUUID
:param str memo: メモ
"""
try:
Schema.query.filter_by(uuid=schema_uuid).one()
except NoResultFound:
click.echo('Schema "{}" is not exist. Do nothing.'.format(schema_uuid))
ctx.exit(code=-1)
try:
Module.query.filter_by(uuid=module_uuid).one()
except NoResultFound:
click.echo('Module "{}" is not exist. Do nothing.'.format(module_uuid))
ctx.exit(code=-1)
with MetaDataSession.begin():
message_box = MessageBox(
uuid=generate_uuid(model=MessageBox),
display_name=display_name,
schema_uuid=schema_uuid,
module_uuid=module_uuid,
memo=memo
)
MetaDataSession.add(message_box)
click.echo('MessageBox "{}" is added.'.format(message_box.uuid))
@cli_box.command('remove')
@click.argument('message_box_uuid', type=UUID)
@click.pass_context
def box_remove(ctx: 'Context', message_box_uuid):
"""モジュールを削除する.
:param Context ctx: Context
:param UUID message_box_uuid: モジュールUUID
"""
try:
message_box = MessageBox.query.filter_by(uuid=message_box_uuid).one()
except NoResultFound:
click.echo('MessageBox "{}" is not registered. Do nothing.'.format(message_box_uuid))
ctx.exit(code=-1)
with MetaDataSession.begin():
MetaDataSession.delete(message_box)
click.echo('MessageBox "{}" is removed.'.format(message_box_uuid))
|
class LuciferMoringstar(object):
DEFAULT_MSG = """👋Hello {mention}.....!!!\nIt's Power Full [{bot_name}](t.me/{bot_username}) Here 😎\nAdd Me To Your Group And Make Sure I'm an Admin There! \nAnd Enjoy My Pever Show.....!!!🤪"""
HELP_MSG = """**<i><b><u> How To Use Me 🤔?</i></b></u>
💡 <i>First You Must Join Our Official Group & Channel to Use Me.</i>
💡 <i>Click Search 🔍 Button Below And Type Your Movie Name..</i>
Or <i>Directly Send Me A Movie Name In Correct Spelling..</i>
💡 <i>സിനിമകൾ ലഭ്യമാകാൻ നിങ്ങൾ ഞങ്ങളുടെ ഒഫീഷ്യൽ ഗ്രൂപ്പിലും ചാനലിലും മെമ്പർ ആയിരിക്കണം..</i>
💡 <i>താഴെ കാണുന്ന Search 🔍 ബട്ടൺ ക്ലിക്ക് ചെയ്ത് സിനിമയുടെ പേര് ടൈപ്പ് ചെയ്യുക.</i>
അല്ലാ എങ്കിൽ <i>സ്പെല്ലിംഗ് തെറ്റ് കൂടാതെ സിനിമയുടെ പേര് മാത്രം അയക്കുക..</i>
💡<i> Don't Ask Web/Tv Series .🙏 I was hired to provide Only movies.</i>
💡<i> If You Need Any Series Use Oru Official Group or Series Bot</i>
💡<i> Check /About For Group Links And Series Bot.</i>
<u><i><b> Examples Or Demo For Search A File</u></i></b>
Lucifer ✅
Lucifer 2019 ✅
Lucifer Hd ❌
Lucifer Movie❌
<i>@freakersfilmy</i>©
**"""
ABOUT_MSG = """
𝑴𝒚 𝑵𝒂𝒎𝒆 : [{bot_name}](t.me/{bot_username})
𝑴𝒚 𝑪𝒓𝒆𝒂𝒕𝒐𝒓 : [𝐍𝐚𝐮𝐠𝐡𝐭𝐲 𝐍𝐨𝐧𝐬𝐞𝐧𝐬𝐞](t.me/naughty_nonsense)
𝑴𝒚 𝑮𝒓𝒐𝒖𝒑 : [𝐅𝐫𝐞𝐚𝐤𝐞𝐫𝐬 𝐅𝐢𝐥𝐦𝐲](https://t.me/freakersfilmy)
𝑪𝒉𝒂𝒏𝒏𝒆𝒍 -1 : [𝐅𝐫𝐞𝐚𝐤𝐞𝐫𝐬 𝐌𝐨𝐯𝐢𝐞𝐬](https://t.me/freakersmovies)
𝑪𝒉𝒂𝒏𝒏𝒆𝒍 -2 : [𝐅𝐫𝐞𝐚𝐤𝐞𝐫𝐬 𝐒𝐞𝐫𝐢𝐞𝐬](https://t.me/freakersseries)
𝑺𝒆𝒓𝒊𝒆𝒔 𝑩𝒐𝒕 : [𝐒𝐞𝐫𝐢𝐞𝐬 𝐒𝐞𝐫𝐚𝐜𝐡 𝐁𝐨𝐭](http://t.me/ffseriesbot)
"""
FILE_CAPTIONS = """Hello 👋 {mention}\n\n📁Title {title}\n\n🔘Size {size}"""
PR0FESS0R_99 = """
**ADMINS COMMANDS**
» /broadcast - Reply Any Media Or Message
» /delete - Reply Files
» /deleteall - Delete All Files
» /total - How Many Files Saved
» /logger - Get Bot Logs
» /channel - Add Channel List"""
ADD_YOUR_GROUP = """**<i>🤷Unreleased Movie or Spelling mistake</i>\n🤔<b>ടൈപ്പ് ചെയ്ത സ്പെല്ലിംഗ് തെറ്റായിരീക്കാം or സിനിമ റിലീസ് ആയിട്ടുണ്ടാവില്ല.</b>\n
<i>Pls Contact Our Official Group Or Our @Admin..</i>**"""
SPELL_CHECK = """
Hello 👋〘 {mention} 〙,
Couldn't Find {query}? Please Click Your Request Movie Name"""
GET_MOVIE_1 = """
𝐓𝐢𝐭𝐥𝐞: [{title}]({url})
𝐘𝐞𝐚𝐫: {year}
𝐑𝐚𝐭𝐢𝐧𝐠 {rating}/10
𝐆𝐞𝐧𝐫𝐞: {genres}
𝐏𝐨𝐰𝐞𝐫𝐞𝐝 𝐁𝐲: <i><b>@freakersfilmy </b></i>©
"""
GET_MOVIE_2 = """
📽️ Requested Movie : {query}
👤 Requested By : {mention}
© **{chat}**"""
|
import os
import logging
import subprocess
from time import sleep
max_load_retry = 30
sleep_init = 0.2
sleep_backstep = 1.5
tmp_file_path = ''
special_names_src = ['Seagate', 'system', 'windows', 'temp', 'tmp',
'ProgramData',
'..', '.', 'home', "Documents and Settings", 'games',
"Program Files (x86)", "Program Files", "Windows",
"System Volume Information", '$RECYCLE.BIN']
special_names = set()
for s in special_names_src:
special_names.add(s.strip().upper())
logging.info('special dirs: %s', repr(special_names))
comments_dir = 'comments_yt'
yt_length = len('C5gtIXxo2Ws')
logging.info('yt_length: %d', yt_length)
dot_part_ext = ".part"
file_types = ["mkv", "mp4", 'webm', 'part']
total_files = 0
interesting_files = 0
subfolders = 0
created_comments = 0
def create_comments_if_missing(start_path):
comments_path = os.path.join(start_path, comments_dir)
if not os.path.exists(comments_path):
logging.info('Creating dir %s', comments_path)
os.mkdir(comments_path)
return comments_path
def grep_ytid(f):
dot_left_part = f[:f.rfind('.')]
minus_right_parts = dot_left_part.split('-')
minus_right_part = minus_right_parts[-1]
k = 1
while len(minus_right_part) < yt_length and len(minus_right_parts) > k:
minus_right_part = minus_right_parts[-k - 1] + '-' + minus_right_part
k += 1
if len(minus_right_part) == yt_length:
return minus_right_part
return ''
def check_existing_comments(comments_path, n=1, prev=None, origin=None):
if os.path.exists(comments_path):
if origin is None:
origin = comments_path
base, ext = os.path.splitext(origin)
new_path = base + '_' + str(n) + ext
comments_path, prev = check_existing_comments(new_path, n + 1, comments_path, origin)
return comments_path, prev
def get_lines_count(file_path):
if file_path is None:
return 0
try:
return int(subprocess.check_output(["wc", "-l", file_path]).decode("utf8").split()[0])
except Exception:
# иногда может не срабатывать из-за символов windows, которые запрещены в файлах linux
# Используем стандартный подход:
count = 0
with open(file_path, 'r', errors="ignore") as fp:
for _line in fp:
count += 1
return count
def del_file(file_path):
if not file_path:
return
if os.path.exists(file_path):
os.remove(file_path)
def process_video(f, ytid, comments_dir_path, opts):
min_new_comments_to_keep = opts['min_new_comments']
min_new_comments_to_keep_perc = opts['min_new_comments_perc'] / 100
skip_existing = opts['skip_existing']
logging.info('Loading comments for %s', f)
comments_name, ext = os.path.splitext(f)
# откусываем временное расширение .part
if ext == dot_part_ext:
comments_name = os.path.splitext(f)[0]
comments_name += '.json'
comments_path = os.path.join(comments_dir_path, comments_name)
new_comments, prev_comments = check_existing_comments(comments_path)
if skip_existing and prev_comments is not None:
logging.info('Skip existing comments for %s', f)
return
global tmp_file_path
tmp_file_path = new_comments
cmd = ['youtube-comment-downloader', '--youtubeid=' + ytid, '--output', new_comments]
logging.info(' '.join(cmd))
try_load = 0
sleep_secs = sleep_init
while try_load != max_load_retry:
result = subprocess.run(cmd)
if result.returncode != 0:
del_file(new_comments)
logging.info('Sleep %d seconds after error', sleep_secs)
sleep(sleep_secs)
sleep_secs *= sleep_backstep
else:
break
new_lines = get_lines_count(new_comments)
logging.info('Got %d new_lines', new_lines)
# Если размер 0 - удаляем
if not new_lines:
logging.info('DEL - no comments!')
del_file(new_comments)
return
if prev_comments is not None:
# или число коментов совпадает с предыдущим - удаляем
old_lines = get_lines_count(prev_comments)
if new_lines == old_lines:
logging.info('DEL - Same amount of comments (%d)', new_lines)
del_file(new_comments)
return
# Не нашли N новых комментов - удаляем
if (min_new_comments_to_keep is not None and new_lines - old_lines < min_new_comments_to_keep) and \
(min_new_comments_to_keep_perc is not None and old_lines > 0 and \
(new_lines - old_lines) / old_lines < min_new_comments_to_keep_perc):
logging.info('DEL - Not enough new comments (%d): %d -> %d',
new_lines - old_lines, old_lines, new_lines)
del_file(new_comments)
return
else:
logging.info('APPEND new comments (%d): %d -> %d',
new_lines - old_lines, old_lines, new_lines)
return
logging.info('ADD new comments (%d)', new_lines)
global created_comments
created_comments += 1
def scan_folder(start_path, opts):
logging.info('Scanning %s', start_path)
comments_path = None
for f in os.listdir(start_path):
if f == comments_dir:
continue
# Фильтруем системные и просто нежелательные каталоги
skip = False
for special_name in special_names:
if f.upper() == special_name:
skip = True
if skip:
logging.info('Skip special dir: %s', f)
continue
fullpath = os.path.join(start_path, f)
if os.path.isfile(fullpath):
global total_files
total_files += 1
if f.split('.')[-1] in file_types:
ytid = grep_ytid(f)
if ytid:
logging.info('File: %s', f)
global interesting_files
interesting_files += 1
# создаем папку только если нашли файл, похожий на ролик YT
if comments_path is None:
comments_path = create_comments_if_missing(start_path)
process_video(f, ytid, comments_path, opts)
global tmp_file_path
tmp_file_path = ''
else:
pass
elif os.path.isdir(fullpath):
if f.startswith('.'):
continue
logging.info('Subdir: %s', f)
global subfolders
subfolders += 1
scan_folder(fullpath, opts)
def setup_logging(log_filepath):
logging.root.handlers = []
logging.basicConfig(filename=log_filepath,
encoding='utf-8',
format='%(asctime)s: %(message)s',
level=logging.DEBUG,
datefmt='%Y-%m-%d %I:%M:%S')
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger("").addHandler(console)
def go_scan(start_path, opts):
try:
scan_folder(start_path, opts)
except KeyboardInterrupt:
logging.info('Interrupted!')
except Exception as ex:
logging.info('Error - %s!' % repr(ex), exc_info=1)
if tmp_file_path:
logging.info('Cleaning tmp file: %s', tmp_file_path)
del_file(tmp_file_path)
logging.info('Processed total files: %s', total_files)
logging.info('interesting files: %s', interesting_files)
logging.info('folders: %s', subfolders)
logging.info('created new comment files: %s', created_comments)
if __name__ == '__main__':
paths = ['E:/video_tmp/Podolyaka']
for start_path in paths:
setup_logging('scrape_runtime.log')
opts = {
'min_new_comments': 500,
'min_new_comments_perc': 10, # % новых комментов
'skip_existing': True
}
go_scan(start_path, opts)
|
#!/usr/bin/env python
from optparse import OptionParser
import os
import numpy as np
import scipy as sp
###################################################################################
# pie_tab.py
#
# For each motif, output a tab delimited file corresponding to the number of cds,
# lncrna, pseudogene, 3' utrs, 5' utrs, rrna and small rna hits. The output of the
# file needs to be used as an input to generate a heatmap.
###################################################################################
################################################################################
# main
################################################################################
def main():
elements = ['cds', 'pseudogene', 'lncrna', 'rrna', 'smallrna', 'utrs_3p', 'utrs_5p']
motif_out ={}
usage = 'usage: %prog [options] <bed_file> <gff_file>'
parser = OptionParser(usage)
#parser.add_option()
(options,args)=parser.parse_args()
if len(args) != 2:
parser.error('Must provide both bed results file and gff file')
else:
bed_file = args[0]
gff_file = args[1]
for line in open(gff_file):
temp = line.split()
motif_id = temp[8]
if motif_id not in motif_out:
motif_out[motif_id] = 1
all_motifs = sorted(motif_out.keys())
raw_path = bed_file.split('/')
iter1 = 0
paths = {}
for i in range(0,len(elements)):
for j in range (0,len(all_motifs)):
add_path = [elements[i], all_motifs[j]]
path = raw_path + add_path
paths[iter1] = '/'.join(path)
iter1 = iter1+1
pie_values = {}
for i in range(0,len(paths)):
if os.path.isfile(paths[i]) == 1:
file = open(paths[i])
file = file.read()
file = file.strip()
lines = file.split('\n')
pie_values[i] = len(lines)
else:
pie_values[i] = 0
TableOfValues = {}
iter = 0
for i in range(0,len(elements)):
MyList = []
count = 0
for j in range(0,(len(paths)/len(elements))):
MyListIndex = i + count + iter
MyList.append(pie_values[MyListIndex])
TableOfValues[elements[i]] = MyList
count = count+1
iter = iter + len(all_motifs) - 1
for key, value in TableOfValues:
print '\t'.join((key, (value))
"""
RowDict = {}
for i in range(0,len(all_motifs)):
temp = []
k = 0
for j in range(0,(len(paths)/len(all_motifs))):
temp.append(pie_values[k+i])
RowDict[all_motifs[i]] = temp
k = k+len(all_motifs)
aRowTotals = []
for i in range(0,len(all_motifs)):
aRowTotals.append(sum(RowDict[all_motifs[i]]))
LogValues = [-1.05929543, -2.26915032, -1.95404313, -7.60090246, -4.54690128,
-1.17182835, -2.44299725]
for i in range(0,len(all_motifs)):
for j in range(0,len(elements)):
if TableOfValues[elements[j]][i] !=0:
TableOfValues[elements[j]][i] = (np.log(TableOfValues[elements[j]][i]/float(aRowTotals[i]))) - LogValues[j]
else:
TableOfValues[elements[j]][i] = -2
motif_names = []
for i in range(0, len(all_motifs)):
temp = all_motifs[i]
temp = temp.split('_')
motif_names.append(temp[0])
"""
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
# Copyright 2020 Ray Cole
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import BaseHTTPRequestHandler, HTTPServer
def simple_post_server(bind_address, port, handler):
class EndpointHttpServer(BaseHTTPRequestHandler):
def do_POST(self):
handler(self)
webServer = HTTPServer((bind_address, port), EndpointHttpServer)
print('Running webserver: %s:%s' % (bind_address, port))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
print('Server completed...')
|
"""
Problem 4 (Hard)
This problem was asked by Stripe.
Given an array of integers, find the first missing positive integer in linear time and constant space.
In other words, find the lowest positive integer that does not exist in the array. The array can contain duplicates and
negative numbers as well.
For example, the input [3, 4, -1, 1] should give 2. The input [1, 2, 0] should give 3.
You can modify the input array in-place.
"""
def problem_04(arr):
index = 1
minIndex = 1
for element in arr:
if 0 < element <= len(arr):
var1 = arr[element - 1]
arr[element - 1] = element
if 0 < var1 <= len(arr):
arr[index - 1] = arr[var1 - 1]
arr[var1 - 1] = var1
else:
arr[index - 1] = var1
if minIndex == arr[minIndex - 1]:
minIndex = index
else:
if minIndex > index:
minIndex = index
index = index + 1
for i in range(1, len(arr)+1):
if i != arr[i-1]:
return i
return len(arr) + 1
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 域名配置更新操作失败,请重试或联系客服人员解决。
FAILEDOPERATION_ECDNCONFIGERROR = 'FailedOperation.EcdnConfigError'
# 内部错误。
INTERNALERROR = 'InternalError'
# 获取用户信息失败,请联系腾讯云工程师进一步排查。
INTERNALERROR_ACCOUNTSYSTEMERROR = 'InternalError.AccountSystemError'
# 数据查询错误,请联系腾讯云工程师进一步排查。
INTERNALERROR_DATASYSTEMERROR = 'InternalError.DataSystemError'
# 内部配置服务错误,请重试或联系客服人员解决。
INTERNALERROR_ECDNCONFIGERROR = 'InternalError.EcdnConfigError'
# 内部数据错误,请联系腾讯云工程师进一步排查。
INTERNALERROR_ECDNDBERROR = 'InternalError.EcdnDbError'
# 系统错误,请联系腾讯云工程师进一步排查。
INTERNALERROR_ECDNSYSTEMERROR = 'InternalError.EcdnSystemError'
# 标签键不存在。
INVALIDPARAMETER_ECDNCAMTAGKEYNOTEXIST = 'InvalidParameter.EcdnCamTagKeyNotExist'
# 无法解析证书信息。
INVALIDPARAMETER_ECDNCERTNOCERTINFO = 'InvalidParameter.EcdnCertNoCertInfo'
# 域名状态不合法。
INVALIDPARAMETER_ECDNDOMAININVALIDSTATUS = 'InvalidParameter.EcdnDomainInvalidStatus'
# 内部接口错误,请联系腾讯云工程师进一步排查。
INVALIDPARAMETER_ECDNINTERFACEERROR = 'InvalidParameter.EcdnInterfaceError'
# 非法Area参数,请参考文档中示例参数填充。
INVALIDPARAMETER_ECDNINVALIDPARAMAREA = 'InvalidParameter.EcdnInvalidParamArea'
# 统计粒度不合法,请参考文档中统计分析示例。
INVALIDPARAMETER_ECDNINVALIDPARAMINTERVAL = 'InvalidParameter.EcdnInvalidParamInterval'
# 参数错误,请参考文档中示例参数填充。
INVALIDPARAMETER_ECDNPARAMERROR = 'InvalidParameter.EcdnParamError'
# 刷新不支持泛域名。
INVALIDPARAMETER_ECDNPURGEWILDCARDNOTALLOWED = 'InvalidParameter.EcdnPurgeWildcardNotAllowed'
# 该域名绑定的标签键数量过多。
INVALIDPARAMETER_ECDNRESOURCEMANYTAGKEY = 'InvalidParameter.EcdnResourceManyTagKey'
# 日期不合法,请参考文档中日期示例。
INVALIDPARAMETER_ECDNSTATINVALIDDATE = 'InvalidParameter.EcdnStatInvalidDate'
# 统计类型不合法,请参考文档中统计分析示例。
INVALIDPARAMETER_ECDNSTATINVALIDMETRIC = 'InvalidParameter.EcdnStatInvalidMetric'
# 标签键不合法。
INVALIDPARAMETER_ECDNTAGKEYINVALID = 'InvalidParameter.EcdnTagKeyInvalid'
# 标签键不存在。
INVALIDPARAMETER_ECDNTAGKEYNOTEXIST = 'InvalidParameter.EcdnTagKeyNotExist'
# 标签键下的值数量过多。
INVALIDPARAMETER_ECDNTAGKEYTOOMANYVALUE = 'InvalidParameter.EcdnTagKeyTooManyValue'
# 标签值不合法。
INVALIDPARAMETER_ECDNTAGVALUEINVALID = 'InvalidParameter.EcdnTagValueInvalid'
# URL 超过限制长度。
INVALIDPARAMETER_ECDNURLEXCEEDLENGTH = 'InvalidParameter.EcdnUrlExceedLength'
# 该用户下标签键数量过多。
INVALIDPARAMETER_ECDNUSERTOOMANYTAGKEY = 'InvalidParameter.EcdnUserTooManyTagKey'
# 参数错误。
INVALIDPARAMETER_PARAMERROR = 'InvalidParameter.ParamError'
# 域名操作过于频繁。
LIMITEXCEEDED_ECDNDOMAINOPTOOOFTEN = 'LimitExceeded.EcdnDomainOpTooOften'
# 刷新的目录数量超过单次限制。
LIMITEXCEEDED_ECDNPURGEPATHEXCEEDBATCHLIMIT = 'LimitExceeded.EcdnPurgePathExceedBatchLimit'
# 刷新的目录数量超过每日限制。
LIMITEXCEEDED_ECDNPURGEPATHEXCEEDDAYLIMIT = 'LimitExceeded.EcdnPurgePathExceedDayLimit'
# 刷新的Url数量超过单次限制。
LIMITEXCEEDED_ECDNPURGEURLEXCEEDBATCHLIMIT = 'LimitExceeded.EcdnPurgeUrlExceedBatchLimit'
# 刷新的Url数量超过每日限额。
LIMITEXCEEDED_ECDNPURGEURLEXCEEDDAYLIMIT = 'LimitExceeded.EcdnPurgeUrlExceedDayLimit'
# 接入域名数超出限制。
LIMITEXCEEDED_ECDNUSERTOOMANYDOMAINS = 'LimitExceeded.EcdnUserTooManyDomains'
# 域名已存在。
RESOURCEINUSE_ECDNDOMAINEXISTS = 'ResourceInUse.EcdnDomainExists'
# ECDN资源正在被操作中。
RESOURCEINUSE_ECDNOPINPROGRESS = 'ResourceInUse.EcdnOpInProgress'
# 账号下无此域名,请确认后重试。
RESOURCENOTFOUND_ECDNDOMAINNOTEXISTS = 'ResourceNotFound.EcdnDomainNotExists'
# 账号下无此域名,请确认后重试。
RESOURCENOTFOUND_ECDNHOSTNOTEXISTS = 'ResourceNotFound.EcdnHostNotExists'
# 项目不存在。
RESOURCENOTFOUND_ECDNPROJECTNOTEXISTS = 'ResourceNotFound.EcdnProjectNotExists'
# 未开通ECDN服务,请开通后使用此接口。
RESOURCENOTFOUND_ECDNUSERNOTEXISTS = 'ResourceNotFound.EcdnUserNotExists'
# 子账号禁止查询整体数据。
UNAUTHORIZEDOPERATION_CDNACCOUNTUNAUTHORIZED = 'UnauthorizedOperation.CdnAccountUnauthorized'
# 子账号未配置cam策略。
UNAUTHORIZEDOPERATION_CDNCAMUNAUTHORIZED = 'UnauthorizedOperation.CdnCamUnauthorized'
# ECDN子账号加速域名未授权。
UNAUTHORIZEDOPERATION_CDNDOMAINUNAUTHORIZED = 'UnauthorizedOperation.CdnDomainUnauthorized'
# ECDN子账号加速域名未授权。
UNAUTHORIZEDOPERATION_CDNHOSTUNAUTHORIZED = 'UnauthorizedOperation.CdnHostUnauthorized'
# 子账号没有授权域名权限,请授权后重试。
UNAUTHORIZEDOPERATION_CDNNODOMAINUNAUTHORIZED = 'UnauthorizedOperation.CdnNoDomainUnauthorized'
# 子账号项目未授权。
UNAUTHORIZEDOPERATION_CDNPROJECTUNAUTHORIZED = 'UnauthorizedOperation.CdnProjectUnauthorized'
# ECDN 子账号加速域名未授权。
UNAUTHORIZEDOPERATION_DOMAINNOPERMISSION = 'UnauthorizedOperation.DomainNoPermission'
# ECDN 子账号加速域名未授权。
UNAUTHORIZEDOPERATION_DOMAINSNOPERMISSION = 'UnauthorizedOperation.DomainsNoPermission'
# 子账号禁止查询整体数据。
UNAUTHORIZEDOPERATION_ECDNACCOUNTUNAUTHORIZED = 'UnauthorizedOperation.EcdnAccountUnauthorized'
# 子账号未配置cam策略。
UNAUTHORIZEDOPERATION_ECDNCAMUNAUTHORIZED = 'UnauthorizedOperation.EcdnCamUnauthorized'
# 域名解析未进行验证。
UNAUTHORIZEDOPERATION_ECDNDOMAINRECORDNOTVERIFIED = 'UnauthorizedOperation.EcdnDomainRecordNotVerified'
# ECDN子账号加速域名未授权。
UNAUTHORIZEDOPERATION_ECDNDOMAINUNAUTHORIZED = 'UnauthorizedOperation.EcdnDomainUnauthorized'
# 该域名属于其他账号,您没有权限接入。
UNAUTHORIZEDOPERATION_ECDNHOSTISOWNEDBYOTHER = 'UnauthorizedOperation.EcdnHostIsOwnedByOther'
# ECDN子账号加速域名未授权。
UNAUTHORIZEDOPERATION_ECDNHOSTUNAUTHORIZED = 'UnauthorizedOperation.EcdnHostUnauthorized'
# 子账号没有授权域名权限,请授权后重试。
UNAUTHORIZEDOPERATION_ECDNNODOMAINUNAUTHORIZED = 'UnauthorizedOperation.EcdnNoDomainUnauthorized'
# 子账号项目未授权。
UNAUTHORIZEDOPERATION_ECDNPROJECTUNAUTHORIZED = 'UnauthorizedOperation.EcdnProjectUnauthorized'
# 加速服务已停服,请重启加速服务后重试。
UNAUTHORIZEDOPERATION_ECDNUSERISSUSPENDED = 'UnauthorizedOperation.EcdnUserIsSuspended'
# 非内测白名单用户,无该功能使用权限。
UNAUTHORIZEDOPERATION_ECDNUSERNOWHITELIST = 'UnauthorizedOperation.EcdnUserNoWhitelist'
# ECDN 子账号cam未授权。
UNAUTHORIZEDOPERATION_NOPERMISSION = 'UnauthorizedOperation.NoPermission'
# ECDN 子账号项目未授权。
UNAUTHORIZEDOPERATION_PROJECTNOPERMISSION = 'UnauthorizedOperation.ProjectNoPermission'
# ECDN 子账号项目未授权。
UNAUTHORIZEDOPERATION_PROJECTSNOPERMISSION = 'UnauthorizedOperation.ProjectsNoPermission'
|
# Generated by Django 3.0.7 on 2020-06-26 11:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('album', '0004_auto_20200626_1356'),
]
operations = [
migrations.RemoveField(
model_name='photos',
name='photo_image',
),
]
|
import datetime
from django.core.mail import send_mail
from rent.models import rentOrder
from login.models import User
def test():
orders = rentOrder.objects.filter(type='long')
for order in orders:
user = order.rent_paidUser
send_mail('房租缴费提醒', "您的订单:" + str(order.id) + "本月房租未交,请前往缴费", '1205672770@qq.com', [user.email])
|
"""
03-complex-resonator.py - Filtering by mean of a complex multiplication.
ComplexRes implements a resonator derived from a complex
multiplication, which is very similar to a digital filter.
It is used here to create a rhythmic chime with varying resonance.
"""
from pyo import *
import random
s = Server().boot()
# Six random frequencies.
freqs = [random.uniform(1000, 3000) for i in range(6)]
# Six different plucking speeds.
pluck = Metro([0.9, 0.8, 0.6, 0.4, 0.3, 0.2]).play()
# LFO applied to the decay of the resonator.
decay = Sine(0.1).range(0.01, 0.15)
# Six ComplexRes filters.
rezos = ComplexRes(pluck, freqs, decay, mul=5).out()
# Change chime frequencies every 7.2 seconds
def new():
freqs = [random.uniform(1000, 3000) for i in range(6)]
rezos.freq = freqs
pat = Pattern(new, 7.2).play()
s.gui(locals())
|
import os
from importlib.machinery import SourceFileLoader
class Configuration():
def __init__(self, config_file, action):
self.config_file = config_file # path + config file
self.action = action
def load(self):
# load experiment config file
config = SourceFileLoader('config', self.config_file).load_module()
# Train the cnn
#if self.action == 'voi+':
# print("Action is voi+...")
#config.output_path = os.path.join(config.experiments_path, config.experiment_name, config.model_output_directory)
#if not os.path.exists(config.output_path):
# os.makedirs(config.output_path)
return config |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Carry out voice commands by recognising keywords."""
import datetime
import logging
import subprocess
import webbrowser
import os
import requests
import shlex
#from simpletcp.tcpserver import TCPServer
#from xbmcjson import XBMC, PLAYER_VIDEO
import actionbase
# =============================================================================
#
# Hey, Makers!
#
# This file contains some examples of voice commands that are handled locally,
# right on your Raspberry Pi.
#
# Do you want to add a new voice command? Check out the instructions at:
# https://aiyprojects.withgoogle.com/voice/#makers-guide-3-3--create-a-new-voice-command-or-action
# (MagPi readers - watch out! You should switch to the instructions in the link
# above, since there's a mistake in the MagPi instructions.)
#
# In order to make a new voice command, you need to do two things. First, make a
# new action where it says:
# "Implement your own actions here"
# Secondly, add your new voice command to the actor near the bottom of the file,
# where it says:
# "Add your own voice commands here"
#
# =============================================================================
# Actions might not use the user's command. pylint: disable=unused-argument
# Example: Say a simple response
# ================================
#
# This example will respond to the user by saying something. You choose what it
# says when you add the command below - look for SpeakAction at the bottom of
# the file.
#
# There are two functions:
# __init__ is called when the voice commands are configured, and stores
# information about how the action should work:
# - self.say is a function that says some text aloud.
# - self.words are the words to use as the response.
# run is called when the voice command is used. It gets the user's exact voice
# command as a parameter.
#xbmc = XBMC("http://192.168.0.34:8080/jsonrpc", "osmc", "osmc")
class SpeakAction(object):
"""Says the given text via TTS."""
def __init__(self, say, words):
self.say = say
self.words = words
def run(self, voice_command):
self.say(self.words)
# Example: Tell the current time
# ==============================
#
# This example will tell the time aloud. The to_str function will turn the time
# into helpful text (for example, "It is twenty past four."). The run function
# uses to_str say it aloud.
class SpeakTime(object):
"""Says the current local time with TTS."""
def __init__(self, say):
self.say = say
def run(self, voice_command):
time_str = self.to_str(datetime.datetime.now())
self.say(time_str)
def to_str(self, dt):
"""Convert a datetime to a human-readable string."""
HRS_TEXT = ['midnight', 'one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve']
MINS_TEXT = ["five", "ten", "quarter", "twenty", "twenty-five", "half"]
hour = dt.hour
minute = dt.minute
# convert to units of five minutes to the nearest hour
minute_rounded = (minute + 2) // 5
minute_is_inverted = minute_rounded > 6
if minute_is_inverted:
minute_rounded = 12 - minute_rounded
hour = (hour + 1) % 24
# convert time from 24-hour to 12-hour
if hour > 12:
hour -= 12
if minute_rounded == 0:
if hour == 0:
return 'It is midnight.'
return "It is %s o'clock." % HRS_TEXT[hour]
if minute_is_inverted:
return 'It is %s to %s.' % (MINS_TEXT[minute_rounded - 1], HRS_TEXT[hour])
return 'It is %s past %s.' % (MINS_TEXT[minute_rounded - 1], HRS_TEXT[hour])
# Example: Run a shell command and say its output
# ===============================================
#
# This example will use a shell command to work out what to say. You choose the
# shell command when you add the voice command below - look for the example
# below where it says the IP address of the Raspberry Pi.
class SpeakShellCommandOutput(object):
"""Speaks out the output of a shell command."""
def __init__(self, say, shell_command, failure_text):
self.say = say
self.shell_command = shell_command
self.failure_text = failure_text
def run(self, voice_command):
output = subprocess.check_output(self.shell_command, shell=True).strip()
if output:
self.say(output)
elif self.failure_text:
self.say(self.failure_text)
# Example: Change the volume
# ==========================
#
# This example will can change the speaker volume of the Raspberry Pi. It uses
# the shell command SET_VOLUME to change the volume, and then GET_VOLUME gets
# the new volume. The example says the new volume aloud after changing the
# volume.
class VolumeControl(object):
"""Changes the volume and says the new level."""
GET_VOLUME = r'amixer get Master | grep "Front Left:" | sed "s/.*\[\([0-9]\+\)%\].*/\1/"'
SET_VOLUME = 'amixer -q set Master %d%%'
def __init__(self, say, change):
self.say = say
self.change = change
def run(self, voice_command):
res = subprocess.check_output(VolumeControl.GET_VOLUME, shell=True).strip()
try:
logging.info("volume: %s", res)
vol = int(res) + self.change
vol = max(0, min(100, vol))
subprocess.call(VolumeControl.SET_VOLUME % vol, shell=True)
self.say(_('Volume at %d %%.') % vol)
except (ValueError, subprocess.CalledProcessError):
logging.exception("Error using amixer to adjust volume.")
# Example: Repeat after me
# ========================
#
# This example will repeat what the user said. It shows how you can access what
# the user said, and change what you do or how you respond.
class RepeatAfterMe(object):
"""Repeats the user's command."""
def __init__(self, say, keyword):
self.say = say
self.keyword = keyword
def run(self, voice_command):
# The command still has the 'repeat after me' keyword, so we need to
# remove it before saying whatever is left.
to_repeat = voice_command.replace(self.keyword, '', 1)
self.say(to_repeat)
# =========================================
# Makers! Implement your own actions here.
# =========================================
class MasterTerminal(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("Yes mister stark")
subprocess.call(["DISPLAY=:0 lxterminal"],shell=True)
class MasterIDLE(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("Yes mister stark")
os.system("idle3")
class OpenTVNetflix(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("With Pleasure")
subprocess.call(shlex.split('/home/pi/voice-recognizer-raspi/src/send_command.sh 192.168.0.21 AAAAAgAAABoAAAB8Aw=='))
class TVVolumeUp(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("With Pleasure")
for _ in range(5):
subprocess.call(shlex.split('/home/pi/voice-recognizer-raspi/src/send_command.sh 192.168.0.12 AAAAAQAAAAEAAAASAw=='))
class TVVolumeDown(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("With Pleasure")
for _ in range(5):
subprocess.call(shlex.split('/home/pi/voice-recognizer-raspi/src/send_command.sh 192.168.0.12 AAAAAQAAAAEAAAATAw=='))
class TVPowerOff(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
subprocess.call(shlex.split('/home/pi/voice-recognizer-raspi/src/send_command.sh 192.168.0.21 AAAAAQAAAAEAAAAvAw=='))
class TVPowerOn(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
subprocess.call(shlex.split('/home/pi/voice-recognizer-raspi/src/send_command.sh 192.168.0.21 AAAAAQAAAAEAAAAuAw=='))
class TVHdmiTwo(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("With Pleasure")
subprocess.call(shlex.split('/home/pi/voice-recognizer-raspi/src/send_command.sh 192.168.0.12 AAAAAgAAABoAAABbAw=='))
class TVHdmiThree(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("With Pleasure")
subprocess.call(shlex.split('/home/pi/voice-recognizer-raspi/src/send_command.sh 192.168.0.12 AAAAAgAAABoAAABcAw=='))
class TVExit(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("With Pleasure")
subprocess.call(shlex.split('/home/pi/voice-recognizer-raspi/src/send_command.sh 192.168.0.12 AAAAAQAAAAEAAABgAw=='))
<<<<<<< HEAD
##class KodiPlay(object):
##
## def __init__(self, say):
## self.say = say
## self.xbmc = xbmc
##
## def run(self, voice_command):
## self.say("With Pleasure")
## self.xbmc.Player.PlayPause([PLAYER_VIDEO])
class Spotify(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("With Pleasure")
subprocess.call(['DISPLAY=:0 /usr/bin/chromium-browser --profile-directory=Default --app-id=ddaicbffbbkapedbibibcajpdbendghk'], shell=True)
class PsOn(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("With Pleasure")
subprocess.call(['sudo ps4-waker [option]'], shell=True)
class PsOff(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("With Pleasure")
subprocess.call(['sudo ps4-waker [option] standby'], shell=True)
class PsWolf(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("With Pleasure")
subprocess.call(['sudo ps4-waker [option] start CUSA00314'], shell=True)
class PsHbo(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("With Pleasure")
subprocess.call(['sudo ps4-waker [option] start CUSA01567'], shell=True)
class PsN(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("With Pleasure")
subprocess.call(['sudo ps4-waker [option] start CUSA00129'], shell=True)
class PsIP(object):
def __init__(self, say):
self.say = say
def run(self, voice_command):
self.say("With Pleasure")
subprocess.call(['sudo ps4-waker [option]'], shell=True)
## xml = """<?xml version="1.0"?>
##<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
## <s:Body>
## <u:X_SendIRCC xmlns:u="urn:schemas-sony-com:service:IRCC:1">
## <IRCCCode>AAAAAQAAAAEAAAATAw==</IRCCCode>
## </u:X_SendIRCC>
## </s:Body>
## <s:Header/>
## <s:BODY><u:X-Auth-PSK>0000</u:X-Auth-PSK></s:Body>
##</s:Envelope>"""
## xml2 = """<?xml version=\"1.0\"?><s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"><s:Body><u:X_SendIRCC xmlns:u=\"urn:schemas-sony-com:service:IRCC:1\"><IRCCCode>AAAAAQAAAAEAAAATAw==</IRCCCode></u:X_SendIRCC></s:Body></s:Envelope>"""
##
## headers = {'Content-Type': 'text/xml'}
## requests.post('http://192.168.0.23/sony/IRCC', data=xml, headers=headers)
def make_actor(say):
"""Create an actor to carry out the user's commands."""
actor = actionbase.Actor()
actor.add_keyword(
_('ip address'), SpeakShellCommandOutput(
say, "ip -4 route get 1 | head -1 | cut -d' ' -f8",
_('I do not have an ip address assigned to me.')))
#actor.add_keyword(_('volume up'), VolumeControl(say, 10))
#actor.add_keyword(_('volume down'), VolumeControl(say, -10))
actor.add_keyword(_('max volume'), VolumeControl(say, 100))
actor.add_keyword(_('GW open a new terminal'), MasterTerminal(say))
actor.add_keyword(_('GW open the python editor'), MasterIDLE(say))
actor.add_keyword(_('GW open netflix'), OpenTVNetflix(say))
actor.add_keyword(_('GW volume up'), TVVolumeUp(say))
actor.add_keyword(_('GW volume down'), TVVolumeDown(say))
actor.add_keyword(_('GW switch to input two'), TVHdmiTwo(say))
actor.add_keyword(_('GW tv exit'), TVExit(say))
#actor.add_keyword(_('GW pause'), KodiPlay(say))
actor.add_keyword(_('GW open music'), Spotify(say))
actor.add_keyword(_('GW switch to input 3'), TVHdmiThree(say))
actor.add_keyword(_('GW wake the giant'), PsOn(say))
actor.add_keyword(_('GW kill giant'), PsOff(say))
actor.add_keyword(_('GW start Wolf'), PsWolf(say))
actor.add_keyword(_('GW tv off'), TVPowerOff(say))
actor.add_keyword(_('GW tv on'), TVPowerOn(say))
actor.add_keyword(_('GW start hbo'), PsHbo(say))
actor.add_keyword(_('GW network search'), PsIP(say))
actor.add_keyword(_('GW open movies'), PsN(say))
actor.add_keyword(_('repeat after me'),
RepeatAfterMe(say, _('repeat after me')))
# =========================================
# Makers! Add your own voice commands here.
# =========================================
return actor
def add_commands_just_for_cloud_speech_api(actor, say):
"""Add simple commands that are only used with the Cloud Speech API."""
def simple_command(keyword, response):
actor.add_keyword(keyword, SpeakAction(say, response))
simple_command('alexa', _("We've been friends since we were both starter projects"))
simple_command(
'beatbox',
'pv zk pv pv zk pv zk kz zk pv pv pv zk pv zk zk pzk pzk pvzkpkzvpvzk kkkkkk bsch')
simple_command(_('clap'), _('clap clap'))
simple_command('google home', _('She taught me everything I know.'))
simple_command(_('hello'), _('hello to you too'))
simple_command(_('tell me a joke'),
_('What do you call an alligator in a vest? An investigator.'))
simple_command(_('three laws of robotics'),
_("""The laws of robotics are
0: A robot may not injure a human being or, through inaction, allow a human
being to come to harm.
1: A robot must obey orders given it by human beings except where such orders
would conflict with the First Law.
2: A robot must protect its own existence as long as such protection does not
conflict with the First or Second Law."""))
simple_command(_('where are you from'), _("A galaxy far, far, just kidding. I'm from Seattle."))
simple_command(_('your name'), _('A machine has no name'))
actor.add_keyword(_('time'), SpeakTime(say))
|
# flake8: noqa
from .environment import EnvironmentWrapper
from .gym import GymEnvWrapper
from .atari import AtariEnvWrapper
|
import pandas as pd
s = pd.Series(range(10))
print(s)
# 0 0
# 1 1
# 2 2
# 3 3
# 4 4
# 5 5
# 6 6
# 7 7
# 8 8
# 9 9
# dtype: int64
print(s.rolling(3))
# Rolling [window=3,center=False,axis=0]
print(type(s.rolling(3)))
# <class 'pandas.core.window.rolling.Rolling'>
print(s.rolling(3).sum())
# 0 NaN
# 1 NaN
# 2 3.0
# 3 6.0
# 4 9.0
# 5 12.0
# 6 15.0
# 7 18.0
# 8 21.0
# 9 24.0
# dtype: float64
print(s.rolling(2).sum())
# 0 NaN
# 1 1.0
# 2 3.0
# 3 5.0
# 4 7.0
# 5 9.0
# 6 11.0
# 7 13.0
# 8 15.0
# 9 17.0
# dtype: float64
print(s.rolling(4).sum())
# 0 NaN
# 1 NaN
# 2 NaN
# 3 6.0
# 4 10.0
# 5 14.0
# 6 18.0
# 7 22.0
# 8 26.0
# 9 30.0
# dtype: float64
print(s.rolling(3, center=True).sum())
# 0 NaN
# 1 3.0
# 2 6.0
# 3 9.0
# 4 12.0
# 5 15.0
# 6 18.0
# 7 21.0
# 8 24.0
# 9 NaN
# dtype: float64
print(s.rolling(4, center=True).sum())
# 0 NaN
# 1 NaN
# 2 6.0
# 3 10.0
# 4 14.0
# 5 18.0
# 6 22.0
# 7 26.0
# 8 30.0
# 9 NaN
# dtype: float64
print(s.rolling(3, min_periods=2).sum())
# 0 NaN
# 1 1.0
# 2 3.0
# 3 6.0
# 4 9.0
# 5 12.0
# 6 15.0
# 7 18.0
# 8 21.0
# 9 24.0
# dtype: float64
print(s.rolling(3, min_periods=1).sum())
# 0 0.0
# 1 1.0
# 2 3.0
# 3 6.0
# 4 9.0
# 5 12.0
# 6 15.0
# 7 18.0
# 8 21.0
# 9 24.0
# dtype: float64
df = pd.DataFrame({'a': range(10), 'b': range(10, 0, -1),
'c': range(10, 20), 'd': range(20, 10, -1)})
print(df.rolling(2).sum())
# a b c d
# 0 NaN NaN NaN NaN
# 1 1.0 19.0 21.0 39.0
# 2 3.0 17.0 23.0 37.0
# 3 5.0 15.0 25.0 35.0
# 4 7.0 13.0 27.0 33.0
# 5 9.0 11.0 29.0 31.0
# 6 11.0 9.0 31.0 29.0
# 7 13.0 7.0 33.0 27.0
# 8 15.0 5.0 35.0 25.0
# 9 17.0 3.0 37.0 23.0
print(df.rolling(2, axis=1).sum())
# a b c d
# 0 NaN 10.0 20.0 30.0
# 1 NaN 10.0 20.0 30.0
# 2 NaN 10.0 20.0 30.0
# 3 NaN 10.0 20.0 30.0
# 4 NaN 10.0 20.0 30.0
# 5 NaN 10.0 20.0 30.0
# 6 NaN 10.0 20.0 30.0
# 7 NaN 10.0 20.0 30.0
# 8 NaN 10.0 20.0 30.0
# 9 NaN 10.0 20.0 30.0
print(s.rolling(3).mean())
# 0 NaN
# 1 NaN
# 2 1.0
# 3 2.0
# 4 3.0
# 5 4.0
# 6 5.0
# 7 6.0
# 8 7.0
# 9 8.0
# dtype: float64
print(s.rolling(3).agg(['sum', 'mean', 'skew', 'cov',
max, min,
lambda x: max(x) - min(x)]))
# sum mean skew cov max min <lambda>
# 0 NaN NaN NaN NaN NaN NaN NaN
# 1 NaN NaN NaN NaN NaN NaN NaN
# 2 3.0 1.0 0.000000e+00 1.0 2.0 0.0 2.0
# 3 6.0 2.0 -7.993606e-15 1.0 3.0 1.0 2.0
# 4 9.0 3.0 2.398082e-14 1.0 4.0 2.0 2.0
# 5 12.0 4.0 -6.394885e-14 1.0 5.0 3.0 2.0
# 6 15.0 5.0 -7.993606e-14 1.0 6.0 4.0 2.0
# 7 18.0 6.0 1.918465e-13 1.0 7.0 5.0 2.0
# 8 21.0 7.0 2.238210e-13 1.0 8.0 6.0 2.0
# 9 24.0 8.0 -5.115908e-13 1.0 9.0 7.0 2.0
|
#
# PySNMP MIB module Application-Monitoring-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Application-Monitoring-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:33:17 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Gauge32, Counter32, NotificationType, MibIdentifier, IpAddress, Unsigned32, NotificationType, ObjectIdentity, Bits, TimeTicks, enterprises, iso, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Gauge32", "Counter32", "NotificationType", "MibIdentifier", "IpAddress", "Unsigned32", "NotificationType", "ObjectIdentity", "Bits", "TimeTicks", "enterprises", "iso", "Integer32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
sni = MibIdentifier((1, 3, 6, 1, 4, 1, 231))
sniProductMibs = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2))
sniAppMon = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2, 23))
sniAppMonSubSystems = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2, 23, 1))
sniAppMonBcamAppl = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2, 23, 2))
sniAppMonUserAppl = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2, 23, 3))
sniAppMonGlobalData = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2, 23, 5))
sniAppMonDcamAppl = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2, 23, 6))
appMonLogfiles = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2, 23, 7))
sniAppMonJVs = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2, 23, 8))
sniAppMonObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2, 23, 9))
appMonTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2, 23, 20))
appMonSubsysTabNum = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonSubsysTabNum.setStatus('mandatory')
if mibBuilder.loadTexts: appMonSubsysTabNum.setDescription('The number of entries in the table appMonSubsysTable')
appMonSubsysTable = MibTable((1, 3, 6, 1, 4, 1, 231, 2, 23, 1, 2), )
if mibBuilder.loadTexts: appMonSubsysTable.setStatus('mandatory')
if mibBuilder.loadTexts: appMonSubsysTable.setDescription('The Subsystem information table')
appMonSubsysEntry = MibTableRow((1, 3, 6, 1, 4, 1, 231, 2, 23, 1, 2, 1), ).setIndexNames((0, "Application-Monitoring-MIB", "appMonSubsysIndex"))
if mibBuilder.loadTexts: appMonSubsysEntry.setStatus('mandatory')
if mibBuilder.loadTexts: appMonSubsysEntry.setDescription('An entry in the table')
appMonSubsysIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonSubsysIndex.setStatus('mandatory')
if mibBuilder.loadTexts: appMonSubsysIndex.setDescription('A unique value for each entry, its value ranges between 1 and the value of appMonSubsysTabNum')
appMonSubsysName = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 1, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonSubsysName.setStatus('mandatory')
if mibBuilder.loadTexts: appMonSubsysName.setDescription('The name of the subsystem')
appMonSubsysVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 1, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonSubsysVersion.setStatus('mandatory')
if mibBuilder.loadTexts: appMonSubsysVersion.setDescription('The current version of the subsystem')
appMonSubsysState = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 255))).clone(namedValues=NamedValues(("created", 1), ("not-created", 2), ("in-delete", 3), ("in-create", 4), ("in-resume", 5), ("in-hold", 6), ("not-resumed", 7), ("locked", 8), ("unknown", 255)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonSubsysState.setStatus('mandatory')
if mibBuilder.loadTexts: appMonSubsysState.setDescription('The current state of the subsystem')
appMonSubsysTasks = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 1, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonSubsysTasks.setStatus('mandatory')
if mibBuilder.loadTexts: appMonSubsysTasks.setDescription('Number of tasks connected to the subsystem')
appMonBcamApplTabNum = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonBcamApplTabNum.setStatus('mandatory')
if mibBuilder.loadTexts: appMonBcamApplTabNum.setDescription('The number of entries in the table appMonTable')
appMonBcamApplTable = MibTable((1, 3, 6, 1, 4, 1, 231, 2, 23, 2, 2), )
if mibBuilder.loadTexts: appMonBcamApplTable.setStatus('mandatory')
if mibBuilder.loadTexts: appMonBcamApplTable.setDescription('The BCAM Application information table')
appMonBcamApplEntry = MibTableRow((1, 3, 6, 1, 4, 1, 231, 2, 23, 2, 2, 1), ).setIndexNames((0, "Application-Monitoring-MIB", "appMonBcamApplIndex"))
if mibBuilder.loadTexts: appMonBcamApplEntry.setStatus('mandatory')
if mibBuilder.loadTexts: appMonBcamApplEntry.setDescription('An entry in the table')
appMonBcamApplIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonBcamApplIndex.setStatus('mandatory')
if mibBuilder.loadTexts: appMonBcamApplIndex.setDescription('A unique value for each entry, its value ranges between 1 and the value of appMonBcamApplTabNum')
appMonBcamApplName = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 2, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonBcamApplName.setStatus('mandatory')
if mibBuilder.loadTexts: appMonBcamApplName.setDescription('The name of the BCAM application')
appMonBcamApplVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 2, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonBcamApplVersion.setStatus('mandatory')
if mibBuilder.loadTexts: appMonBcamApplVersion.setDescription('The current version of the BCAM application')
appMonBcamApplState = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 255))).clone(namedValues=NamedValues(("running", 1), ("terminated", 2), ("aborted", 3), ("loaded", 4), ("in-hold", 5), ("scheduled", 6), ("unknown", 255)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonBcamApplState.setStatus('mandatory')
if mibBuilder.loadTexts: appMonBcamApplState.setDescription('The current state of the BCAM application')
appMonBcamApplMonJV = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 2, 2, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonBcamApplMonJV.setStatus('mandatory')
if mibBuilder.loadTexts: appMonBcamApplMonJV.setDescription('Name of the MONJV monitoring the application')
appMonUserApplTabNum = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonUserApplTabNum.setStatus('mandatory')
if mibBuilder.loadTexts: appMonUserApplTabNum.setDescription('The number of entries in the table appMonTable')
appMonUserApplTable = MibTable((1, 3, 6, 1, 4, 1, 231, 2, 23, 3, 2), )
if mibBuilder.loadTexts: appMonUserApplTable.setStatus('mandatory')
if mibBuilder.loadTexts: appMonUserApplTable.setDescription('The User Application information table')
appMonUserApplEntry = MibTableRow((1, 3, 6, 1, 4, 1, 231, 2, 23, 3, 2, 1), ).setIndexNames((0, "Application-Monitoring-MIB", "appMonUserApplIndex"))
if mibBuilder.loadTexts: appMonUserApplEntry.setStatus('mandatory')
if mibBuilder.loadTexts: appMonUserApplEntry.setDescription('An entry in the table')
appMonUserApplIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonUserApplIndex.setStatus('mandatory')
if mibBuilder.loadTexts: appMonUserApplIndex.setDescription('A unique value for each entry, its value ranges between 1 and the value of appMonUserApplTabNum')
appMonUserApplName = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 3, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonUserApplName.setStatus('mandatory')
if mibBuilder.loadTexts: appMonUserApplName.setDescription('The name of the User application')
appMonUserApplVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 3, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonUserApplVersion.setStatus('mandatory')
if mibBuilder.loadTexts: appMonUserApplVersion.setDescription('The current version of the User application')
appMonUserApplState = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 3, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 255))).clone(namedValues=NamedValues(("running", 1), ("terminated", 2), ("aborted", 3), ("loaded", 4), ("in-hold", 5), ("scheduled", 6), ("unknown", 255)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonUserApplState.setStatus('mandatory')
if mibBuilder.loadTexts: appMonUserApplState.setDescription('The current state of the User application')
appMonUserApplMonJV = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 3, 2, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonUserApplMonJV.setStatus('mandatory')
if mibBuilder.loadTexts: appMonUserApplMonJV.setDescription('Name of the MONJV monitoring the application')
appMonVersion = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 5, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonVersion.setStatus('mandatory')
if mibBuilder.loadTexts: appMonVersion.setDescription('Version of application monitor')
appMonConfFile = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 5, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: appMonConfFile.setStatus('mandatory')
if mibBuilder.loadTexts: appMonConfFile.setDescription('Pathname of the configuration file')
appMonTrapFormat = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 5, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("generic", 1), ("tv-cc", 2), ("all", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: appMonTrapFormat.setStatus('mandatory')
if mibBuilder.loadTexts: appMonTrapFormat.setDescription('Format of trap')
appMonDcamApplTabNum = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 6, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonDcamApplTabNum.setStatus('mandatory')
if mibBuilder.loadTexts: appMonDcamApplTabNum.setDescription('The number of entries in the table appMonDcamApplTable')
appMonDcamApplTable = MibTable((1, 3, 6, 1, 4, 1, 231, 2, 23, 6, 2), )
if mibBuilder.loadTexts: appMonDcamApplTable.setStatus('mandatory')
if mibBuilder.loadTexts: appMonDcamApplTable.setDescription('The DCAM Application information table')
appMonDcamApplEntry = MibTableRow((1, 3, 6, 1, 4, 1, 231, 2, 23, 6, 2, 1), ).setIndexNames((0, "Application-Monitoring-MIB", "appMonDcamApplIndex"))
if mibBuilder.loadTexts: appMonDcamApplEntry.setStatus('mandatory')
if mibBuilder.loadTexts: appMonDcamApplEntry.setDescription('An entry in the table')
appMonDcamApplIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 6, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonDcamApplIndex.setStatus('mandatory')
if mibBuilder.loadTexts: appMonDcamApplIndex.setDescription('A unique value for each entry, its value ranges between 1 and the value of appMonDcamApplTabNum')
appMonDcamApplName = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 6, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonDcamApplName.setStatus('mandatory')
if mibBuilder.loadTexts: appMonDcamApplName.setDescription('The name of the DCAM application')
appMonDcamApplHost = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 6, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonDcamApplHost.setStatus('mandatory')
if mibBuilder.loadTexts: appMonDcamApplHost.setDescription('The host on which the DCAM application is running')
appMonDcamApplState = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 6, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 255))).clone(namedValues=NamedValues(("running", 1), ("terminated", 2), ("unknown", 255)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonDcamApplState.setStatus('mandatory')
if mibBuilder.loadTexts: appMonDcamApplState.setDescription('The current state of the DCAM application')
appMonLogfTabNum = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 7, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonLogfTabNum.setStatus('mandatory')
if mibBuilder.loadTexts: appMonLogfTabNum.setDescription('The number of entries in the table appMonLogfTable')
appMonLogfTable = MibTable((1, 3, 6, 1, 4, 1, 231, 2, 23, 7, 2), )
if mibBuilder.loadTexts: appMonLogfTable.setStatus('mandatory')
if mibBuilder.loadTexts: appMonLogfTable.setDescription('The Logfile table')
appMonLogfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 231, 2, 23, 7, 2, 1), ).setIndexNames((0, "Application-Monitoring-MIB", "appMonLogfName"))
if mibBuilder.loadTexts: appMonLogfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: appMonLogfEntry.setDescription('An entry in the table')
appMonLogfName = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 7, 2, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonLogfName.setStatus('mandatory')
if mibBuilder.loadTexts: appMonLogfName.setDescription('Pathname of the logfile')
appMonLogfAppl = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 7, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonLogfAppl.setStatus('mandatory')
if mibBuilder.loadTexts: appMonLogfAppl.setDescription('The application name')
appMonLogfState = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 7, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("deactive", 1), ("active", 2), ("start-begin", 3), ("start-new", 4), ("start-end", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: appMonLogfState.setStatus('mandatory')
if mibBuilder.loadTexts: appMonLogfState.setDescription('The current monitoring state of the logfile is either active or deactive. For write operation either start-begin, start-new, start-end or deactive has to be specified')
appMonLogfPattern = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 7, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonLogfPattern.setStatus('mandatory')
if mibBuilder.loadTexts: appMonLogfPattern.setDescription('Pattern for which a trap is generated ')
appMonJVTabNum = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 8, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonJVTabNum.setStatus('mandatory')
if mibBuilder.loadTexts: appMonJVTabNum.setDescription('The number of entries in the table appMonJVTable')
appMonJVTable = MibTable((1, 3, 6, 1, 4, 1, 231, 2, 23, 8, 2), )
if mibBuilder.loadTexts: appMonJVTable.setStatus('mandatory')
if mibBuilder.loadTexts: appMonJVTable.setDescription('The BCAM Application information table')
appMonJVEntry = MibTableRow((1, 3, 6, 1, 4, 1, 231, 2, 23, 8, 2, 1), ).setIndexNames((0, "Application-Monitoring-MIB", "appMonJVName"))
if mibBuilder.loadTexts: appMonJVEntry.setStatus('mandatory')
if mibBuilder.loadTexts: appMonJVEntry.setDescription('An entry in the table')
appMonJVName = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 8, 2, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonJVName.setStatus('mandatory')
if mibBuilder.loadTexts: appMonJVName.setDescription('Name of the JV')
appMonJVAppl = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 8, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonJVAppl.setStatus('mandatory')
if mibBuilder.loadTexts: appMonJVAppl.setDescription('The application name')
appMonJVValue = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 8, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonJVValue.setStatus('mandatory')
if mibBuilder.loadTexts: appMonJVValue.setDescription('The current value of the JV')
appMonJVPattern = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 8, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonJVPattern.setStatus('mandatory')
if mibBuilder.loadTexts: appMonJVPattern.setDescription('Value pattern for which a trap will be sent')
appMonObjectsTabNum = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 9, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonObjectsTabNum.setStatus('mandatory')
if mibBuilder.loadTexts: appMonObjectsTabNum.setDescription('The number of entries in the table appMonObjectTable')
appMonObjectTable = MibTable((1, 3, 6, 1, 4, 1, 231, 2, 23, 9, 2), )
if mibBuilder.loadTexts: appMonObjectTable.setStatus('mandatory')
if mibBuilder.loadTexts: appMonObjectTable.setDescription('The Object table')
appMonObjectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 231, 2, 23, 9, 2, 1), ).setIndexNames((0, "Application-Monitoring-MIB", "appMonObjectIndex"))
if mibBuilder.loadTexts: appMonObjectEntry.setStatus('mandatory')
if mibBuilder.loadTexts: appMonObjectEntry.setDescription('An entry in the table')
appMonObjectIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 9, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonObjectIndex.setStatus('mandatory')
if mibBuilder.loadTexts: appMonObjectIndex.setDescription('A unique value for each entry, its value ranges between 1 and the value of appMonObjectTabNum')
appMonObjectName = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 9, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonObjectName.setStatus('mandatory')
if mibBuilder.loadTexts: appMonObjectName.setDescription('The name of the object')
appMonObjectBcamAppl = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 9, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonObjectBcamAppl.setStatus('mandatory')
if mibBuilder.loadTexts: appMonObjectBcamAppl.setDescription('Name of the BCAM applications belonging to the object')
appMonObjectUserAppl = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 9, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonObjectUserAppl.setStatus('mandatory')
if mibBuilder.loadTexts: appMonObjectUserAppl.setDescription('Name of the user applications belonging to the object')
appMonObjectDcamAppl = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 9, 2, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonObjectDcamAppl.setStatus('mandatory')
if mibBuilder.loadTexts: appMonObjectDcamAppl.setDescription('Name of the DCAM applications belonging to the object')
appMonObjectSub = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 9, 2, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonObjectSub.setStatus('mandatory')
if mibBuilder.loadTexts: appMonObjectSub.setDescription('Name of the subsystems belonging to the object')
appMonObjectLogfile = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 9, 2, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonObjectLogfile.setStatus('mandatory')
if mibBuilder.loadTexts: appMonObjectLogfile.setDescription('Name of the logfiles belonging to the object')
appMonObjectJV = MibTableColumn((1, 3, 6, 1, 4, 1, 231, 2, 23, 9, 2, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appMonObjectJV.setStatus('mandatory')
if mibBuilder.loadTexts: appMonObjectJV.setDescription('Name of the JVs belonging to the object')
appMonTrapData = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2, 23, 20, 1))
appMonSource = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 20, 1, 1), DisplayString())
if mibBuilder.loadTexts: appMonSource.setStatus('mandatory')
if mibBuilder.loadTexts: appMonSource.setDescription(' ')
appMonDevice = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 20, 1, 2), DisplayString())
if mibBuilder.loadTexts: appMonDevice.setStatus('mandatory')
if mibBuilder.loadTexts: appMonDevice.setDescription(' ')
appMonMsg = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 20, 1, 3), DisplayString())
if mibBuilder.loadTexts: appMonMsg.setStatus('mandatory')
if mibBuilder.loadTexts: appMonMsg.setDescription(' ')
appMonWeight = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 20, 1, 4), Integer32())
if mibBuilder.loadTexts: appMonWeight.setStatus('mandatory')
if mibBuilder.loadTexts: appMonWeight.setDescription(' ')
appMonAckOID = MibScalar((1, 3, 6, 1, 4, 1, 231, 2, 23, 20, 1, 6), ObjectIdentifier())
if mibBuilder.loadTexts: appMonAckOID.setStatus('mandatory')
if mibBuilder.loadTexts: appMonAckOID.setDescription(' ')
appMonGeneric = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2, 23, 20, 2))
appMonConfirm = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 2, 23, 20, 3))
appMonGenTrap = NotificationType((1, 3, 6, 1, 4, 1, 231, 2, 23, 20, 2) + (0,999)).setObjects(("Application-Monitoring-MIB", "appMonSource"), ("Application-Monitoring-MIB", "appMonDevice"), ("Application-Monitoring-MIB", "appMonMsg"), ("Application-Monitoring-MIB", "appMonWeight"))
if mibBuilder.loadTexts: appMonGenTrap.setDescription('General application trap')
appMonConfirmTrap = NotificationType((1, 3, 6, 1, 4, 1, 231, 2, 23, 20, 3) + (0,999)).setObjects(("Application-Monitoring-MIB", "appMonSource"), ("Application-Monitoring-MIB", "appMonDevice"), ("Application-Monitoring-MIB", "appMonMsg"), ("Application-Monitoring-MIB", "appMonWeight"))
if mibBuilder.loadTexts: appMonConfirmTrap.setDescription('General application trap, the trap must be confirmed')
mibBuilder.exportSymbols("Application-Monitoring-MIB", appMonBcamApplTabNum=appMonBcamApplTabNum, appMonSubsysTabNum=appMonSubsysTabNum, appMonBcamApplMonJV=appMonBcamApplMonJV, appMonConfFile=appMonConfFile, appMonVersion=appMonVersion, appMonSource=appMonSource, appMonJVValue=appMonJVValue, appMonBcamApplState=appMonBcamApplState, appMonLogfPattern=appMonLogfPattern, appMonDevice=appMonDevice, appMonDcamApplHost=appMonDcamApplHost, appMonTraps=appMonTraps, appMonSubsysVersion=appMonSubsysVersion, appMonLogfTabNum=appMonLogfTabNum, appMonGeneric=appMonGeneric, sniAppMonObjects=sniAppMonObjects, appMonObjectUserAppl=appMonObjectUserAppl, appMonLogfTable=appMonLogfTable, appMonSubsysIndex=appMonSubsysIndex, appMonJVEntry=appMonJVEntry, appMonAckOID=appMonAckOID, appMonSubsysTable=appMonSubsysTable, appMonObjectDcamAppl=appMonObjectDcamAppl, appMonTrapData=appMonTrapData, sniAppMon=sniAppMon, appMonBcamApplTable=appMonBcamApplTable, appMonDcamApplIndex=appMonDcamApplIndex, appMonObjectIndex=appMonObjectIndex, appMonGenTrap=appMonGenTrap, appMonDcamApplEntry=appMonDcamApplEntry, appMonObjectsTabNum=appMonObjectsTabNum, appMonDcamApplState=appMonDcamApplState, appMonSubsysEntry=appMonSubsysEntry, appMonJVTabNum=appMonJVTabNum, appMonWeight=appMonWeight, appMonUserApplEntry=appMonUserApplEntry, sniAppMonSubSystems=sniAppMonSubSystems, appMonObjectLogfile=appMonObjectLogfile, appMonLogfState=appMonLogfState, sniAppMonDcamAppl=sniAppMonDcamAppl, appMonMsg=appMonMsg, appMonLogfEntry=appMonLogfEntry, appMonUserApplState=appMonUserApplState, appMonSubsysTasks=appMonSubsysTasks, appMonLogfName=appMonLogfName, sniProductMibs=sniProductMibs, sniAppMonJVs=sniAppMonJVs, appMonSubsysName=appMonSubsysName, appMonConfirm=appMonConfirm, appMonDcamApplTabNum=appMonDcamApplTabNum, appMonConfirmTrap=appMonConfirmTrap, appMonUserApplTabNum=appMonUserApplTabNum, appMonObjectName=appMonObjectName, appMonDcamApplName=appMonDcamApplName, appMonBcamApplIndex=appMonBcamApplIndex, appMonJVTable=appMonJVTable, appMonLogfiles=appMonLogfiles, appMonLogfAppl=appMonLogfAppl, appMonSubsysState=appMonSubsysState, appMonDcamApplTable=appMonDcamApplTable, appMonJVPattern=appMonJVPattern, sniAppMonGlobalData=sniAppMonGlobalData, appMonBcamApplEntry=appMonBcamApplEntry, appMonBcamApplVersion=appMonBcamApplVersion, sniAppMonUserAppl=sniAppMonUserAppl, appMonJVAppl=appMonJVAppl, appMonObjectJV=appMonObjectJV, appMonUserApplIndex=appMonUserApplIndex, appMonBcamApplName=appMonBcamApplName, appMonObjectEntry=appMonObjectEntry, appMonUserApplTable=appMonUserApplTable, appMonObjectSub=appMonObjectSub, appMonUserApplMonJV=appMonUserApplMonJV, appMonJVName=appMonJVName, appMonObjectBcamAppl=appMonObjectBcamAppl, appMonTrapFormat=appMonTrapFormat, appMonObjectTable=appMonObjectTable, sniAppMonBcamAppl=sniAppMonBcamAppl, appMonUserApplName=appMonUserApplName, sni=sni, appMonUserApplVersion=appMonUserApplVersion)
|
# Python imports.
import random
from os import path
import sys
# Other imports.
from HierarchyStateClass import HierarchyState
from simple_rl.utils import make_mdp
from simple_rl.planning.ValueIterationClass import ValueIteration
parent_dir = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(parent_dir)
from state_abs.StateAbstractionClass import StateAbstraction
from state_abs import sa_helpers
from StateAbstractionStackClass import StateAbstractionStack
import make_abstr_mdp
# ----------------------------------
# -- Make State Abstraction Stack --
# ----------------------------------
def make_random_sa_stack(mdp_distr, cluster_size_ratio=0.5, max_num_levels=2):
'''
Args:
mdp_distr (MDPDistribution)
cluster_size_ratio (float): A float in (0,1) that determines the size of the abstract state space.
max_num_levels (int): Determines the _total_ number of levels in the hierarchy (includes ground).
Returns:
(StateAbstraction)
'''
# Get ground state space.
vi = ValueIteration(mdp_distr.get_all_mdps()[0], delta=0.0001, max_iterations=5000)
ground_state_space = vi.get_states()
sa_stack = StateAbstractionStack(list_of_phi=[])
# Each loop adds a stack.
for i in xrange(max_num_levels - 1):
# Grab curent state space (at level i).
cur_state_space = _get_level_i_state_space(ground_state_space, sa_stack, i)
cur_state_space_size = len(cur_state_space)
if int(cur_state_space_size / cluster_size_ratio) <= 1:
# The abstract is as small as it can get.
break
# Add the mapping.
new_phi = {}
for s in cur_state_space:
new_phi[s] = HierarchyState(data=random.randint(1, max(int(cur_state_space_size * cluster_size_ratio), 1)), level=i + 1)
if len(set(new_phi.values())) <= 1:
# The abstract is as small as it can get.
break
# Add the sa to the stack.
sa_stack.add_phi(new_phi)
return sa_stack
def _get_level_i_state_space(ground_state_space, state_abstr_stack, level):
'''
Args:
mdp_distr (MDPDistribution)
state_abstr_stack (StateAbstractionStack)
level (int)
Returns:
(list)
'''
level_i_state_space = set([])
for s in ground_state_space:
level_i_state_space.add(state_abstr_stack.phi(s, level))
return list(level_i_state_space)
def main():
# Make MDP Distribution.
mdp_class = "four_room"
environment = make_mdp.make_mdp_distr(mdp_class=mdp_class, grid_dim=10)
sa_stack = make_random_sa_stack(environment, max_num_levels=5)
sa_stack.print_state_space_sizes()
if __name__ == "__main__":
main() |
"""
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
from typing import Dict, Optional, Sequence, Tuple, Union
import pandas as pd
import numpy as np
from sklearn import metrics
import sklearn
import matplotlib.pyplot as plt
class MetricsLibClass:
@staticmethod
def auc_roc(pred: Sequence[Union[np.ndarray, float]],
target: Sequence[Union[np.ndarray, int]],
sample_weight: Optional[Sequence[Union[np.ndarray, float]]] = None,
pos_class_index: int = -1,
max_fpr: Optional[float] = None) -> float:
"""
Compute auc roc (Receiver operating characteristic) score using sklearn (one vs rest)
:param pred: prediction array per sample. Each element shape [num_classes]
:param target: target per sample. Each element is an integer in range [0 - num_classes)
:param sample_weight: Optional - weight per sample for a weighted auc. Each element is float in range [0-1]
:param pos_class_index: the class to compute the metrics in one vs rest manner - set to 1 in binary classification
:param max_fpr: float > 0 and <= 1, default=None
If not ``None``, the standardized partial AUC over the range [0, max_fpr] is returned.
:return auc Receiver operating characteristic score
"""
if not isinstance(pred[0], np.ndarray):
pred = [np.array(p) for p in pred]
pos_class_index = 1
y_score = np.asarray(pred)
else:
if pos_class_index < 0:
pos_class_index = pred[0].shape[0] - 1
y_score = np.asarray(pred)[:, pos_class_index]
return metrics.roc_auc_score(y_score=y_score,
y_true=np.asarray(target) == pos_class_index,
sample_weight=sample_weight,
max_fpr=max_fpr)
@staticmethod
def roc_curve(
pred: Sequence[Union[np.ndarray, float]],
target: Sequence[Union[np.ndarray, int]],
class_names: Sequence[str],
sample_weight: Optional[Sequence[Union[np.ndarray, float]]] = None,
output_filename: Optional[str] = None) -> Dict:
"""
Multi class version for roc curve
:param pred: List of arrays of shape [NUM_CLASSES]
:param target: List of arrays specifying the target class per sample
:return: saving roc curve to a file and return the input to figure to a dictionary
"""
# if class_names not specified assume binary classification
if class_names is None:
class_names = [None, "Positive"]
# extract info for the plot
results = {}
for cls, cls_name in enumerate(class_names):
if cls_name is None:
continue
fpr, tpr, _ = sklearn.metrics.roc_curve(target, np.array(pred)[:, cls], sample_weight=sample_weight, pos_label=cls)
auc = sklearn.metrics.auc(fpr, tpr)
results[cls_name] = {"fpr": fpr, "tpr": tpr, "auc": auc}
# display
if output_filename is not None:
for cls_name, cls_res in results.items():
plt.plot(cls_res["fpr"], cls_res["tpr"], label=f'{cls_name}(auc={cls_res["auc"]:0.2f})')
plt.title("ROC curve")
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend()
plt.savefig(output_filename)
plt.close()
return results
@staticmethod
def auc_pr(pred: Sequence[Union[np.ndarray, float]],
target: Sequence[Union[np.ndarray, int]],
sample_weight: Optional[Sequence[Union[np.ndarray, float]]] = None,
pos_class_index: int = -1) -> float:
"""
Compute auc pr (precision-recall) score using sklearn (one vs rest)
:param pred: prediction array per sample. Each element shape [num_classes]
:param target: target per sample. Each element is an integer in range [0 - num_classes)
:param sample_weight: Optional - weight per sample for a weighted auc. Each element is float in range [0-1]
:param pos_class_index: the class to compute the metrics in one vs rest manner - set to 1 in binary classification
:return auc precision recall score
"""
if not isinstance(pred[0], np.ndarray):
pred = [np.array(p) for p in pred]
pos_class_index = 1
y_score = np.asarray(pred)
else:
if pos_class_index < 0:
pos_class_index = pred[0].shape[0] - 1
y_score = np.asarray(pred)[:, pos_class_index]
precision, recall, _ = metrics.precision_recall_curve(probas_pred=y_score,
y_true=np.asarray(target) == pos_class_index,
sample_weight=sample_weight)
return metrics.auc(recall, precision)
@staticmethod
def accuracy(pred: Sequence[Union[np.ndarray, int]],
target: Sequence[Union[np.ndarray, int]],
sample_weight: Optional[Sequence[Union[np.ndarray, float]]] = None):
"""
Compute accuracy score
:param pred: class prediction. Each element is an integer in range [0 - num_classes)
:param target: the target class. Each element is an integer in range [0 - num_classes)
:param sample_weight: Optional - weight per sample for a weighted score. Each element is float in range [0-1]
:return: accuracy score
"""
pred = np.array(pred)
target = np.array(target)
return metrics.accuracy_score(target, pred, sample_weight=sample_weight)
@staticmethod
def confusion_metrics(pred: Sequence[Union[np.ndarray, int]],
target: Sequence[Union[np.ndarray, int]],
pos_class_index: int = 1,
metrics:Sequence[str] = tuple(),
sample_weight: Optional[Sequence[Union[np.ndarray, float]]] = None) -> Dict[str, float]:
"""
Compute metrics derived from one-vs-rest confusion matrix such as 'sensitivity', 'recall', 'tpr', 'specificity', 'selectivity', 'npr', 'precision', 'ppv', 'f1'
Assuming that there are positive cases and negative cases in targets
:param pred: class prediction. Each element is an integer in range [0 - num_classes)
:param target: the target class. Each element is an integer in range [0 - num_classes)
:param pos_class_index: the class to compute the metrics in one vs rest manner - set to 1 in binary classification
:param metrics: required metrics names, options: 'sensitivity', 'recall', 'tpr', 'specificity', 'selectivity', 'npr', 'precision', 'ppv', 'f1'
:param sample_weight: Optional - weight per sample for a weighted score. Each element is float in range [0-1]
:return: dictionary, including the computed values for the required metrics.
format: {"tp": <>, "tn": <>, "fp": <>, "fn": <>, <required metric name>: <>}
"""
pred = np.array(pred)
target = np.array(target)
class_target_t = np.where(target == pos_class_index, 1, 0)
class_pred_t = np.where(pred == pos_class_index, 1, 0)
if sample_weight is None:
sample_weight = np.ones_like(class_target_t)
res = {}
tp = (np.logical_and(class_target_t, class_pred_t)*sample_weight).sum()
fn = (np.logical_and(class_target_t, np.logical_not(class_pred_t))*sample_weight).sum()
fp = (np.logical_and(np.logical_not(class_target_t), class_pred_t)*sample_weight).sum()
tn = (np.logical_and(np.logical_not(class_target_t), np.logical_not(class_pred_t))*sample_weight).sum()
for metric in metrics:
if metric in ['sensitivity', 'recall', 'tpr']:
res[metric] = tp / (tp + fn)
elif metric in ['specificity', 'selectivity', 'tnr']:
res[metric] = tp / (tn + fp)
elif metric in ['precision', 'ppv']:
if tp + fp != 0:
res[metric] = tp / (tp + fp)
else:
res[metric] = 0
elif metric in ['f1']:
res[metric] = 2 * tp / (2 * tp + fp + fn)
elif metric in ["matrix"]:
res["tp"] = tp
res["fn"] = fn
res["fp"] = fp
res["tn"] = tn
else:
raise Exception(f'unknown metric {metric}')
return res
@staticmethod
def confusion_matrix(cls_pred: Sequence[int], target :Sequence[int], class_names: Sequence[str], sample_weight : Optional[Sequence[float]] = None) -> Dict[str, pd.DataFrame]:
"""
Calculates Confusion Matrix (multi class version)
:param cls_pred: sequence of class prediction
:param target: sequence of labels
:param class_names: string name per class
:param sample_weight: optional, weight per sample.
:return: {"count": <confusion matrix>, "percent" : <confusion matrix - percent>)
Confusion matrix whose i-th row and j-th column entry indicates the number of samples with true label being i-th class and predicted label being j-th class.
"""
conf_matrix = sklearn.metrics.confusion_matrix(y_true=target, y_pred=cls_pred, sample_weight=sample_weight)
conf_matrix_count = pd.DataFrame(conf_matrix, columns=class_names, index=class_names)
conf_matrix_total = conf_matrix.sum(axis=1)
conf_matrix_count["total"] = conf_matrix_total
conf_matrix_percent = pd.DataFrame(conf_matrix / conf_matrix_total[:, None], columns=class_names, index=class_names)
return {"count": conf_matrix_count, "percent": conf_matrix_percent}
@staticmethod
def multi_class_bs(pred: np.ndarray, target: np.ndarray) -> float:
"""
Brier Score:
bs = 1/N * SUM_n SUM_c (pred_{n,c} - target_{n,c})^2
:param pred: probability score. Expected Shape [N, C]
:param target: target class (int) per sample. Expected Shape [N]
"""
# create one hot vector
target_one_hot = np.zeros_like(pred)
target_one_hot[np.arange(target_one_hot.shape[0]), target] = 1
return float(np.mean(np.sum((pred - target_one_hot) ** 2, axis=1)))
@staticmethod
def multi_class_bss(pred: Sequence[np.ndarray], target: Sequence[np.ndarray]) -> float:
"""
Brier Skill Score:
bss = 1 - bs / bs_{ref}
bs_{ref} will be computed for a model that makes a predictions according to the prevalance of each class in dataset
:param pred: probability score. Expected Shape [N, C]
:param target: target class (int) per sample. Expected Shape [N]
"""
if isinstance(pred[0], np.ndarray) and pred[0].shape[0] > 1:
pred = np.array(pred)
else:
# binary case
pred = np.array(pred)
pred = np.stack((1-pred, pred), axis=-1)
target = np.array(target)
# BS
bs = MetricsLibClass.multi_class_bs(pred, target)
# no skill BS
no_skill_prediction = [(target == target_cls).sum() / target.shape[0] for target_cls in
range(pred.shape[-1])]
no_skill_predictions = np.tile(np.array(no_skill_prediction), (pred.shape[0], 1))
bs_ref = MetricsLibClass.multi_class_bs(no_skill_predictions, target)
return 1.0 - bs / bs_ref
@staticmethod
def convert_probabilities_to_class(pred: Sequence[Union[np.ndarray, float]], operation_point: Union[float, Sequence[Tuple[int, float]]]) -> np.array:
"""
convert probabilities to class prediction
:param pred: sequence of numpy arrays / floats of shape [NUM_CLASSES]
:param operation_point: list of tuples (class_idx, threshold) or empty sequence for argmax
:return: array of class predictions
"""
if isinstance(pred[0], np.ndarray) and pred[0].shape[0] > 1:
pred = np.array(pred)
else:
# binary case
pred = np.array(pred)
pred = np.stack((1-pred, pred), axis=-1)
# if no threshold specified, simply apply argmax
if operation_point is None or (isinstance(operation_point, Sequence) and len(operation_point) == 0):
return np.argmax(pred, -1)
# binary operation point
if isinstance(operation_point, float):
if pred[0].shape[0] == 2:
return np.where(pred[:, 1] > operation_point, 1, 0)
elif pred[0].shape[0] == 1:
return np.where(pred > operation_point, 1, 0)
else:
raise Exception(f"Error - got single float as an operation point for multiclass prediction")
# convert according to thresholds
output_class = np.array([-1 for x in range(len(pred))])
for thr in operation_point:
class_idx = thr[0]
class_thr = thr[1]
# argmax
if class_idx == "argmax":
output_class[output_class == -1] = np.argmax(pred, -1)[output_class == -1]
# among all the samples which not already predicted, set the ones that cross the threshold with this class
target_idx = np.argwhere(np.logical_and(pred[:, class_idx] > class_thr, output_class == -1))
output_class[target_idx] = class_idx
return output_class
|
import logging
from enum import unique, Enum
logger = logging.getLogger(__name__)
@unique
class WirenControlType(Enum):
"""
Wirenboard controls types
Based on https://github.com/wirenboard/homeui/blob/master/conventions.md
"""
# generic types
switch = "switch"
alarm = "alarm"
pushbutton = "pushbutton"
range = "range"
rgb = "rgb"
text = "text"
value = "value"
# special types
temperature = "temperature"
rel_humidity = "rel_humidity"
atmospheric_pressure = "atmospheric_pressure"
rainfall = "rainfall"
wind_speed = "wind_speed"
power = "power"
power_consumption = "power_consumption"
voltage = "voltage"
water_flow = "water_flow"
water_consumption = "water_consumption"
resistance = "resistance"
concentration = "concentration"
heat_power = "heat_power"
heat_energy = "heat_energy"
# custom types
current = "current"
WIREN_UNITS_DICT = {
WirenControlType.temperature: '°C',
WirenControlType.rel_humidity: '%',
WirenControlType.atmospheric_pressure: 'millibar',
WirenControlType.rainfall: 'mm per hour',
WirenControlType.wind_speed: 'm/s',
WirenControlType.power: 'watt',
WirenControlType.power_consumption: 'kWh',
WirenControlType.voltage: 'V',
WirenControlType.water_flow: 'm³/hour',
WirenControlType.water_consumption: 'm³',
WirenControlType.resistance: 'Ohm',
WirenControlType.concentration: 'ppm',
WirenControlType.heat_power: 'Gcal/hour',
WirenControlType.heat_energy: 'Gcal',
WirenControlType.current: 'A',
}
_WIREN_TO_HASS_MAPPER = {
WirenControlType.switch: None, # see wirenboard_to_hass_type()
WirenControlType.alarm: 'binary_sensor',
WirenControlType.pushbutton: 'binary_sensor',
WirenControlType.range: None, # see wirenboard_to_hass_type()
# WirenControlType.rgb: 'light', #TODO: add
WirenControlType.text: 'sensor',
WirenControlType.value: 'sensor',
WirenControlType.temperature: 'sensor',
WirenControlType.rel_humidity: 'sensor',
WirenControlType.atmospheric_pressure: 'sensor',
WirenControlType.rainfall: 'sensor',
WirenControlType.wind_speed: 'sensor',
WirenControlType.power: 'sensor',
WirenControlType.power_consumption: 'sensor',
WirenControlType.voltage: 'sensor',
WirenControlType.water_flow: 'sensor',
WirenControlType.water_consumption: 'sensor',
WirenControlType.resistance: 'sensor',
WirenControlType.concentration: 'sensor',
WirenControlType.heat_power: 'sensor',
WirenControlType.heat_energy: 'sensor',
WirenControlType.current: 'sensor',
}
def wiren_to_hass_type(control):
if control.type == WirenControlType.switch:
return 'binary_sensor' if control.read_only else 'switch'
elif control.type == WirenControlType.range:
# return 'sensor' if control.read_only else 'light'
# return 'sensor' if control.read_only else 'cover'
return 'sensor' if control.read_only else None
elif control.type in _WIREN_TO_HASS_MAPPER:
return _WIREN_TO_HASS_MAPPER[control.type]
return None
_unknown_types = []
def apply_payload_for_component(payload, device, control, control_topic, inverse: bool):
hass_entity_type = wiren_to_hass_type(control)
if inverse:
_payload_on = '0'
_payload_off = '1'
else:
_payload_on = '1'
_payload_off = '0'
if hass_entity_type == 'switch':
payload.update({
'payload_on': _payload_on,
'payload_off': _payload_off,
'state_on': _payload_on,
'state_off': _payload_off,
'state_topic': f"{control_topic}",
'command_topic': f"{control_topic}/on",
})
elif hass_entity_type == 'binary_sensor':
payload.update({
'payload_on': _payload_on,
'payload_off': _payload_off,
'state_topic': f"{control_topic}",
})
elif hass_entity_type == 'sensor':
payload.update({
'state_topic': f"{control_topic}",
})
if control.type == WirenControlType.temperature:
payload['device_class'] = 'temperature'
if control.units:
payload['unit_of_measurement'] = control.units
# elif hass_entity_type == 'cover':
# if control.max is None:
# logger.error(f'{device}: Missing "max" for range: {control}')
# return
# payload.update({
# 'tilt_status_topic': f"{control_topic}",
# 'tilt_command_topic': f"{control_topic}/on",
# 'tilt_min': 0,
# 'tilt_max': control.max,
# 'tilt_closed_value': 0,
# 'tilt_opened_value': control.max,
# })
# elif hass_entity_type == 'light':
# if control.max is None:
# logger.error(f'{device}: Missing "max" for light: {control}')
# return
# payload.update({
# 'command_topic': f"{control_topic}/none",
# 'brightness_state_topic': f"{control_topic}",
# 'brightness_command_topic': f"{control_topic}/on",
# 'brightness_scale': control.max
# })
else:
if not hass_entity_type in _unknown_types:
logger.warning(f"No algorithm for hass type '{control.type.name}', hass: '{hass_entity_type}'")
_unknown_types.append(hass_entity_type)
return None
return hass_entity_type
|
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class BiologicallyDerivedProductStorageScaleCode(GenericTypeCode):
"""
BiologicallyDerivedProductStorageScale
From: http://hl7.org/fhir/product-storage-scale in valuesets.xml
BiologicallyDerived Product Storage Scale.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://hl7.org/fhir/product-storage-scale
"""
codeset: FhirUri = "http://hl7.org/fhir/product-storage-scale"
class BiologicallyDerivedProductStorageScaleCodeValues:
"""
Fahrenheit temperature scale.
From: http://hl7.org/fhir/product-storage-scale in valuesets.xml
"""
Fahrenheit = BiologicallyDerivedProductStorageScaleCode("farenheit")
"""
Celsius or centigrade temperature scale.
From: http://hl7.org/fhir/product-storage-scale in valuesets.xml
"""
Celsius = BiologicallyDerivedProductStorageScaleCode("celsius")
"""
Kelvin absolute thermodynamic temperature scale.
From: http://hl7.org/fhir/product-storage-scale in valuesets.xml
"""
Kelvin = BiologicallyDerivedProductStorageScaleCode("kelvin")
|
from hashlib import sha256
class BitCoinMinner:
block_number: int
transaction: str
previous_hash: str
prefix_zero: int
max_nonce: int
def __init__(self, block_number: int, transaction: str, previous_hash: str, prefix_zero: int, max_nonce: int = None):
self.block_number = block_number
self.transaction = transaction
self.previous_hash = previous_hash
self.prefix_zero = prefix_zero
self.max_nonce = max_nonce
self.prefix_string = '0' * self.prefix_zero
self.encoded_data = None
self.new_hash = None
def praise_summary(self, nonce: int):
'''Print summary
:params nonce (int) Number of attempt
'''
print("------------------------------------------------------------------------")
print("Congratulations! You successfully mined 6.25 bitcoins with {} nonces".format(nonce + 1))
print("-------------------------------- Detail --------------------------------")
print("transaction: ", self.transaction)
print("deciphered hash:", self.previous_hash)
print("new hash: ", self.new_hash)
print("nonces: ", nonce, "times")
def _hash_sha256(self) -> str:
'''Convert string to sha256'''
return sha256(self.encoded_data).hexdigest()
def mining(self, nonce):
self.data = str(self.block_number) + self.transaction + self.previous_hash + str(nonce)
self.encoded_data = self.data.encode('ascii')
self.new_hash = self._hash_sha256()
if self.new_hash.startswith(self.prefix_string):
self.praise_summary(nonce)
exit()
def start_miner_with_nonces(self):
''' Start miner with maximun nonces'''
print('Start mining with {} maximun nonces ...'.format(self.max_nonce))
for nonce in range(self.max_nonce):
self.mining(nonce)
raise BaseException(f'Could not find bitcoin after trying {nonce} nonces')
def start_miner(self):
''' Start miner with no timeout'''
nonce = 0
print('Start mining forever ...')
while True:
self.mining(nonce)
nonce += 1
def run(self):
if self.max_nonce:
self.start_miner_with_nonces()
else:
self.start_miner()
if __name__ == '__main__':
block_number = 5
transaction = '''Toni -> Mike = 1 BTC, Rose -> David = 2 BTC'''
previous_hash = "0000000xa036944e29568d0cff17edbe038f81208fecf9a66be9a2b8321c6ec7"
difficulty = 4
max_nonce = 100000000
max_nonce = None
# TODO add Logging
# TODO execution time
# TODO unittest
btc_miner = BitCoinMinner(
block_number=5,
transaction=transaction,
previous_hash=previous_hash,
prefix_zero=difficulty,
max_nonce=max_nonce
)
btc_miner.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.