content stringlengths 5 1.05M |
|---|
from Model.Direction.CardinalDirection.XDirection.xdirection import XDirection
class EAST (XDirection):
def __str__ (self): return 'l' |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/2/20 下午1:38
# @Author : sws
# @Site : 使用 ForkingMixin
# @File : forking_mixin_socket_server.py
# @Software: PyCharm
import os
import socket
import threading
import SocketServer
SERVER_HOST = 'localhost'
SERVER_PORT = 0 # 动态是设置端口
BUF_SIZE = 10
ECHO_MSG = 'hello echo server!'
class ForkingClient(object):
'''
Client
'''
def __init__(self, ip, port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((ip, port))
self.cuId = os.getpid()
def run(self):
# cuId = os.getpid()
cuId = self.cuId
print 'Pid...{0} '.format(cuId)
send_data = self.sock.send(ECHO_MSG)
print 'Sent: {0} characters, so far ..'.format(send_data)
respon = self.sock.recv(BUF_SIZE)
print 'Pid..: {0} received {1}'.format(cuId, respon)
def shutdown(self):
print 'Pid: {0} is shutdowning.'.format(self.cuId)
self.sock.close()
class ForkingServerRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(BUF_SIZE)
cuId = os.getpid()
res = '{0}:{1}'.format(cuId, data)
print '{0}...Sending'.format(cuId)
self.request.send(res)
return
class ForkingServer(SocketServer.ForkingMixIn, SocketServer.TCPServer):
pass
def main():
server = ForkingServer((SERVER_HOST, SERVER_PORT), ForkingServerRequestHandler)
ip, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
client1 = ForkingClient(ip, port)
client1.run()
cl2 = ForkingClient(ip, port)
cl2.run()
server.shutdown()
client1.shutdown()
cl2.shutdown()
server.socket.close()
if __name__ == "__main__":
main() |
from scipy.signal import find_peaks , find_peaks_cwt
import scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
def plot_peaks(time_series):
peak_indexes, _ = find_peaks(record["time_series"])
plt.plot(record["time_series"])
plt.plot(peak_indexes, record["time_series"][peak_indexes], "x")
plt.plot(np.zeros_like(record["time_series"]), "--", color="gray")
def build_mm_record(dist_ts="gamma", valleys=False):
dist_ts="gamma"
valleys=False
record = {}
record["time_series"] = time_series
if valleys:
record["time_series"] = -time_series - min(-time_series)
#Peaks
record["peaks_loc"], _ = find_peaks(record["time_series"])
record["peaks"] = record["time_series"][record["peaks_loc"]]
shape, loc, scale = params = stats.gamma.fit(record["peaks"] )
loglh = stats.gamma.logpdf(record["peaks"], shape, lo
c, scale).sum()
record['time_series_']
from statsmodels.base.model import GenericLikelihoodModel
class Gamma(GenericLikelihoodModel):
nparams = 3
def loglike(self, params):
return gamma.logpdf(self.endog, *params).sum()
from scipy.stats import gamma
res = Gamma(record["peaks"]).fit(start_params=params)
res.df_model = len(params)
res.df_resid = len(data) - len(params)
print(res.summary()) |
import math
import heapq
def allocate(desire, reputation, output=None):
if output is None:
output = [0]*len(reputation)
desire = desire[:]
desire_sum = sum(desire)
if desire_sum < 0:
desire = [-d for d in desire]
reputation = [-r for r in reputation]
elif desire_sum == 0:
output[:] = desire
return output
reputation_heap = [(r, i) for (i, r) in enumerate(reputation)
if desire[i] > 0]
reputation_heap.append((float('inf'), -1))
heapq.heapify(reputation_heap)
available_resources = 0
for i, d in enumerate(desire):
if d < 0:
output[i] = d
available_resources += -d
while available_resources > 0:
if len(reputation_heap) < 2:
max_reputation, max_id = reputation_heap[0]
output[max_id] += available_resources
available_resources = 0
break
max_reputation, max_id = heapq.heappop(reputation_heap)
ref_reputation, ref_id = reputation_heap[0]
distance = ref_reputation - max_reputation
distance = math.ceil(distance)
if distance == 0:
distance = 1
max_allocatable = min(available_resources, desire[max_id])
allocation = int(min(max_allocatable, distance))
output[max_id] += allocation
available_resources -= allocation
if max_allocatable > distance:
max_reputation += allocation
desire[max_id] -= allocation
heapq.heappush(reputation_heap, (max_reputation, max_id))
if desire_sum < 0:
output[:] = [-o for o in output]
return output
def check_allocation(desire, output):
def utility(desire, output):
if output < desire < 0:
return False
if output < 0 <= desire:
return False
return True
if sum(output) != 0:
return False
if not all(utility(d, o) for (d, o) in zip(desire, output)):
return False
return True
|
#!python3
"""
Demonstration of the PO+PROP1 algorithm.
Programmer: Tom Latinn
Since: 2021-02
"""
import networkx as nx
from fairpy.agents import AdditiveAgent
from fairpy.items.allocations_fractional import FractionalAllocation
from fairpy.items.po_and_prop1_allocation import find_po_and_prop1_allocation
agent1 = AdditiveAgent({"a": 10, "b": 100, "c": 80, "d": -100}, name="agent1")
agent2 = AdditiveAgent({"a": 20, "b": 100, "c": -40, "d": 10}, name="agent2")
print("Agent 1: {}\nAgent 2: {}".format(agent1, agent2))
all_items = {'a', 'b', 'c', 'd'}
all_agents = [agent1, agent2]
initial_allocation = FractionalAllocation(all_agents,
[{'a':0.0,'b': 0.3,'c':1.0,'d':0.0},{'a':1.0,'b':0.7,'c':0.0,'d':1.0}
])
print("Initial allocation", initial_allocation)
G = nx.Graph()
G.add_node(agent1)
G.add_node(agent2)
G.add_node('a')
G.add_node('b')
G.add_node('c')
G.add_node('d')
G.add_edge(agent1, 'b')
G.add_edge(agent1, 'c')
G.add_edge(agent2, 'a')
G.add_edge(agent2, 'b')
G.add_edge(agent2, 'd')
print("Nodes of graph: {}\nEdges of graph: {}".format(G.nodes(), G.edges()))
new_allocation = find_po_and_prop1_allocation(G, initial_allocation, all_items)
print(new_allocation) |
# Purpose: entity space
# Created: 13.03.2011
# Copyright (C) 2011, Manfred Moitzi
# License: MIT License
from __future__ import unicode_literals
__author__ = "mozman <mozman@gmx.at>"
from .lldxf.const import DXFStructureError
class EntitySpace(list):
"""An EntitySpace is a collection of drawing entities.
The ENTITY section is such an entity space, but also blocks.
The EntitySpace stores only handles to the drawing entity database.
"""
def __init__(self, entitydb):
self._entitydb = entitydb
def get_tags_by_handle(self, handle):
return self._entitydb[handle]
def store_tags(self, tags):
try:
handle = tags.get_handle()
except ValueError: # no handle tag available
# handle is not stored in tags!!!
handle = self._entitydb.handles.next()
self.append(handle)
self._entitydb[handle] = tags
return handle
def write(self, stream):
for handle in self:
# write linked entities
while handle is not None:
tags = self._entitydb[handle]
tags.write(stream)
handle = tags.link
def delete_entity(self, entity):
# do not delete database objects - entity space just manage handles
self.remove(entity.dxf.handle)
def delete_all_entities(self):
# do not delete database objects - entity space just manage handles
del self[:]
def add_handle(self, handle):
self.append(handle)
class LayoutSpaces(object):
def __init__(self, entitydb, dxfversion):
self._layout_spaces = {}
self._entitydb = entitydb
self._dxfversion = dxfversion
if dxfversion <= 'AC1009':
self._get_key = lambda t: t.noclass.find_first(67, default=0) # paper space value
else:
self._get_key = lambda t: t.noclass.find_first(330, default=0) # if no owner tag, set 0 and repair later
def __iter__(self):
""" Iterate over all layout entity spaces.
"""
return iter(self._layout_spaces.values())
def __getitem__(self, key):
""" Get layout entity space by *key*.
"""
return self._layout_spaces[key]
def __len__(self):
return sum(len(entity_space) for entity_space in self._layout_spaces.values())
def handles(self):
""" Iterate over all handles in all entity spaces.
"""
for entity_space in self:
for handle in entity_space:
yield handle
def repair_model_space(self, new_model_space_key):
def update_entity_tags(entity_space):
for handle in entity_space:
tags = entity_space.get_tags_by_handle(handle)
tags.noclass.set_first(330, new_model_space_key)
# if paper space is set to 1 -> set 0 for model space
try:
entity_tags = tags.get_subclass("AcDbEntity")
except KeyError:
raise DXFStructureError("Entity has no subclass 'AcDbEntity'.")
if entity_tags.find_first(67, default=0) != 0:
entity_tags.set_first(67, 0)
if self._dxfversion <= 'AC1009':
return
if 0 not in self._layout_spaces: # no temporary model space exists
return
temp_model_space = self._layout_spaces[0]
model_space = self.get_entity_space(new_model_space_key)
model_space.extend(temp_model_space)
update_entity_tags(temp_model_space) # just for entities in the temporary model space
del self._layout_spaces[0] # just delete the temporary model space, not the entities itself
def get_entity_space(self, key):
""" Get entity space by *key* or create new entity space.
"""
try:
entity_space = self._layout_spaces[key]
except KeyError: # create new entity space
entity_space = EntitySpace(self._entitydb)
self.set_entity_space(key, entity_space)
return entity_space
def set_entity_space(self, key, entity_space):
self._layout_spaces[key] = entity_space
def store_tags(self, tags):
""" Store *tags* in associated layout entity space.
"""
# AC1018: if entities have no owner tag (330) (thanks to ProE), store this entities in a temporary model space
# with layout_key = 0;
# this will be resolved later in LayoutSpaces.repair_model_space()
entity_space = self.get_entity_space(self._get_key(tags))
entity_space.store_tags(tags)
def write(self, stream, keys=None):
""" Write all entity spaces to *stream*.
If *keys* is not *None*, write only entity spaces defined in *keys*.
"""
layout_spaces = self._layout_spaces
if keys is None:
keys = set(layout_spaces.keys())
for key in keys:
layout_spaces[key].write(stream)
def delete_entity(self, entity):
""" Delete *entity* from associated layout entity space.
Type of *entity* has to be DXFEntity() or inherited.
"""
key = self._get_key(entity.tags)
try:
entity_space = self._layout_spaces[key]
except KeyError: # ignore
pass
else:
entity_space.delete_entity(entity)
def delete_entity_space(self, key):
""" Delete layout entity space *key*.
"""
entity_space = self._layout_spaces[key]
entity_space.delete_all_entities()
del self._layout_spaces[key]
def delete_all_entities(self):
""" Delete all entities from all layout entity spaces.
"""
# Do not delete the entity space objects itself, just remove all entities from all entity spaces.
for entity_space in self._layout_spaces.values():
entity_space.delete_all_entities()
|
from configparser import NoSectionError
from pyrogram import CallbackQuery, InlineKeyboardMarkup, InlineKeyboardButton
from ..TG_AutoConfigurator import AutoConfigurator
from ..utils import tools, messages
@AutoConfigurator.on_callback_query()
def callback(bot: AutoConfigurator, callback_query: CallbackQuery):
if tools.admin_check(bot, callback_query):
data = callback_query.data.split()
if data[0] == "delete":
bot.reload_config()
try:
section = bot.remove_config_section(data[1])
except NoSectionError:
info = "Источник {} не был найден. Возможно он был уже удален.".format(data[1])
else:
info = messages.SECTION_DELETED.format(section)
callback_query.edit_message_text(info)
return
elif data[0] == "switch":
bot.reload_config()
option = bot.config.getboolean(
data[1],
data[2],
fallback=bot.config.getboolean(
"global", data[2], fallback=True if data[2] in ("disable_web_page_preview", "sign_posts") else False
),
)
if (
data[1] != "global"
and bot.config.has_option(data[1], data[2])
and option
is not bot.config.getboolean(
"global", data[2], fallback=True if data[2] in ("disable_web_page_preview", "sign_posts") else False
)
):
bot.config.remove_option(data[1], data[2])
else:
bot.config.set(data[1], data[2], str(not option))
bot.save_config()
info, reply_markup = tools.generate_setting_info(bot, data[1])
callback_query.edit_message_text(info, reply_markup=reply_markup, disable_web_page_preview=True)
return
elif data[0] == "show":
bot.reload_config()
if data[2] == "send_reposts":
info = "**Настройка отправки репостов:**\n\n"
button_list = [
InlineKeyboardButton("Отключить", callback_data="reposts {} no".format(data[1])),
InlineKeyboardButton("Включить", callback_data="reposts {} yes".format(data[1])),
]
footer_buttons = [
InlineKeyboardButton("Только посты", callback_data="reposts {} post_only".format(data[1]))
]
button_list = tools.build_menu(button_list, n_cols=2, footer_buttons=footer_buttons)
if data[1] != "global":
button_list.append(
[
InlineKeyboardButton(
"Использование глобальное значение", callback_data="reposts {} reset".format(data[1])
)
]
)
if bot.config.has_option(data[1], data[2]):
option = bot.config.get(data[1], "send_reposts")
else:
option = bot.config.get("global", "send_reposts")
info = messages.SOURCE_USE_GLOBAL_SETTINGS
if option in ("no", "False", 0):
info += "Отправка репостов отключена"
elif option in ("post_only", 1):
info += "Отправка только постов" + messages.PARTIAL_REPOSTS
elif option in ("yes", "all", "True", 2):
info += "Отправка репостов включена"
reply_markup = InlineKeyboardMarkup(button_list)
callback_query.edit_message_text(info, reply_markup=reply_markup)
return
elif data[0] == "reposts":
if data[2] == "reset" and bot.config.has_option(data[1], "send_reposts"):
bot.config.remove_option(data[1], "send_reposts")
else:
bot.config.set(data[1], "send_reposts", data[2])
bot.save_config()
info, reply_markup = tools.generate_setting_info(bot, data[1])
callback_query.edit_message_text(info, reply_markup=reply_markup, disable_web_page_preview=True)
|
try:
import pybullet_envs # pytype: disable=import-error
except ImportError:
pybullet_envs = None
try:
import highway_env # pytype: disable=import-error
except ImportError:
highway_env = None
try:
import mocca_envs # pytype: disable=import-error
except ImportError:
mocca_envs = None
|
from tool import *
from xgb_classifier import xgb_classifier
import numpy as np
import pickle
from scipy import sparse
def xgb_meta_predict(data_base_dir,data_meta_part1_dir,submission_dir):
test_id=pickle.load(open(data_base_dir+"test_id.p","rb"))
y_all=pickle.load(open(data_base_dir+"y.p","rb"))
X_all=pickle.load(open(data_base_dir+"X_all.p","rb"))
X_test=pickle.load(open(data_base_dir+"X_test_all.p","rb"))
y_part1=y_all[:y_all.shape[0]/2,:]
xgb_clf=xgb_classifier(eta=0.07,min_child_weight=6,depth=20,num_round=150,threads=16)
X_xgb_predict = xgb_clf.train_predict_all_labels(X_all, y_all,X_test,predict_y14=True)
save_predictions(submission_dir+'xgb-raw-d20-e0.07-min6-tree150.csv.gz', test_id , X_xgb_predict)
xgb_clf=xgb_classifier(eta=0.1,min_child_weight=7,depth=100,num_round=150,threads=16)
X_xgb_predict = xgb_clf.train_predict_label(X_all, y_all,X_test,label=33) # predict label 33 only
save_predictions(submission_dir+'xgb-y33-d100-e0.1-min7-tree150.csv.gz', test_id , X_xgb_predict)
X_part1_best_online=pickle.load(open(data_meta_part1_dir+ "X_meta_part1_online.p", "rb" ) )
X_test_best_online=pickle.load(open(data_meta_part1_dir+ "X_test_meta_online.p", "rb" ) )
X_numerical=pickle.load(open(data_base_dir+"X_numerical.p","rb"))
X_numerical_part1=X_numerical[:X_numerical.shape[0]/2,:]
X_test_numerical=pickle.load(open(data_base_dir+"X_test_numerical.p","rb"))
X_part1_xgb=pickle.load(open(data_meta_part1_dir+ "X_meta_part1_xgb.p", "rb" ) )
X_test_xgb =pickle.load(open(data_meta_part1_dir+ "X_test_meta_xgb_all.p", "rb" ) )
X_part1_rf=pickle.load(open(data_meta_part1_dir+ "X_meta_part1_rf.p", "rb" ) )
X_test_rf=pickle.load(open(data_meta_part1_dir+ "X_test_meta_rf.p", "rb" ) )
X_part1_sgd=pickle.load(open(data_meta_part1_dir+ "X_meta_part1_sgd.p", "rb" ) )
X_test_sgd=pickle.load(open(data_meta_part1_dir+ "X_test_meta_sgd.p", "rb" ) )
X_sparse=pickle.load(open(data_base_dir+"X_sparse.p","rb"))
X_test_sparse=pickle.load(open(data_base_dir+"X_test_sparse.p","rb"))
X_sparse_part1=X_sparse[:X_sparse.shape[0]/2,:]
X=sparse.csr_matrix(sparse.hstack((X_sparse_part1,sparse.coo_matrix(np.hstack ([X_part1_best_online,X_part1_rf,X_part1_sgd,X_part1_xgb,X_numerical_part1]).astype(float)))))
Xt=sparse.csr_matrix(sparse.hstack((X_test_sparse,sparse.coo_matrix(np.hstack ([X_test_best_online,X_test_rf,X_test_sgd,X_test_xgb,X_test_numerical]).astype(float)))))
xgb_clf=xgb_classifier(eta=0.1,min_child_weight=6,depth=30,num_round=80,threads=16)
X_xgb_predict = xgb_clf.train_predict_label(X, y_part1,Xt,label=33) # predict label 33 only
save_predictions(submission_dir+'xgb-y33-d30-e0.1-min6-tree80-all-sparse.csv.gz', test_id , X_xgb_predict)
import sys
if __name__ == "__main__":
data_base_dir=sys.argv[1]
data_meta_part1_dir=sys.argv[2]
submission_dir=sys.argv[3]
xgb_meta_predict(data_base_dir,data_meta_part1_dir,submission_dir)
|
# Licensed under a 3-clause BSD style license - see PYDL_LICENSE.rst
# -*- coding: utf-8 -*-
# Also cite https://doi.org/10.5281/zenodo.1095150 when referencing PYDL
"""
Implements pure python support methods for
:class:`pypeit.bspline.bspline.bspline`.
.. include:: ../links.rst
"""
import warnings
from IPython import embed
import numpy as np
def bspline_model(x, action, lower, upper, coeff, n, nord, npoly):
"""
Calculate the bspline model.
Args:
x (`numpy.ndarray`_):
The independent variable in the fit.
action (`numpy.ndarray`_):
Action matrix. See
:func:`pypeit.bspline.bspline.bspline.action.` The shape
of the array is expected to be ``nd`` by ``npoly*nord``.
lower (`numpy.ndarray`_):
Vector with the starting indices along the second axis of
action used to construct the model.
upper (`numpy.ndarray`_):
Vector with the (inclusive) ending indices along the
second axis of action used to construct the model.
coeff (`numpy.ndarray`_):
The model coefficients used for each action.
n (:obj:`int`):
Number of unmasked measurements included in the fit.
nord (:obj:`int`):
Fit order.
npoly (:obj:`int`):
Polynomial per fit order.
Returns:
`numpy.ndarray`: The best fitting bspline model at all
provided :math:`x`.
"""
# TODO: Can we save some of these objects to self so that we
# don't have to recreate them?
# TODO: x is always 1D right?
# TODO: Used for testing bspline
# np.savez_compressed('bspline_model.npz', x=x, action=action, lower=lower, upper=upper,
# coeff=coeff, n=n, nord=nord, npoly=npoly)
# raise ValueError('Entered bspline_model')
yfit = np.zeros(x.shape, dtype=x.dtype)
spot = np.arange(npoly * nord, dtype=int)
nowidth = np.invert(upper+1 > lower)
for i in range(n-nord+1):
if nowidth[i]:
continue
yfit[lower[i]:upper[i]+1] = np.dot(action[lower[i]:upper[i]+1,:],
coeff.flatten('F')[i*npoly+spot])
return yfit
def intrv(nord, breakpoints, x):
"""
Find the segment between breakpoints which contain each value in
the array x.
The minimum breakpoint is nbkptord -1, and the maximum
is nbkpt - nbkptord - 1.
Parameters
----------
nord : :obj:`int`
Order of the fit.
breakpoints : `numpy.ndarray`_
Locations of good breakpoints
x : :class:`numpy.ndarray`
Data values, assumed to be monotonically increasing.
Returns
-------
:class:`numpy.ndarray`
Position of array elements with respect to breakpoints.
"""
# TODO: Used for testing bspline
# np.savez_compressed('intrv.npz', nord=nord, breakpoints=breakpoints, x=x)
# raise ValueError('Entered solution_arrays')
n = breakpoints.size - nord
indx = np.zeros(x.size, dtype=int)
ileft = nord - 1
for i in range(x.size):
while x[i] > breakpoints[ileft+1] and ileft < n - 1:
ileft += 1
indx[i] = ileft
return indx
def solution_arrays(nn, npoly, nord, ydata, action, ivar, upper, lower):
"""
Support function that builds the arrays for Cholesky
decomposition.
Args:
nn (:obj:`int`):
Number of good break points.
npoly (:obj:`int`):
Polynomial per fit order.
nord (:obj:`int`):
Fit order.
ydata (`numpy.ndarray`_):
Data to fit.
action (`numpy.ndarray`_):
Action matrix. See
:func:`pypeit.bspline.bspline.bspline.action`. The shape
of the array is expected to be ``nd`` by ``npoly*nord``.
ivar (`numpy.ndarray`_):
Inverse variance in the data to fit.
upper (`numpy.ndarray`_):
Vector with the (inclusive) ending indices along the
second axis of action used to construct the model.
lower (`numpy.ndarray`_):
Vector with the starting indices along the second axis of
action used to construct the model.
Returns:
tuple: Returns (1) matrix :math:`A` and (2) vector :math:`b`
prepared for Cholesky decomposition and used in the solution
to the equation :math:`Ax=b`.
"""
# TODO: Used for testing bspline
# np.savez_compressed('solution_arrays.npz', nn=nn, npoly=npoly, nord=nord, ydata=ydata,
# action=action, ivar=ivar, upper=upper, lower=lower)
# raise ValueError('Entered solution_arrays')
nfull = nn * npoly
bw = npoly * nord
a2 = action * np.sqrt(ivar)[:,None]
alpha = np.zeros((bw, nfull+bw), dtype=float)
beta = np.zeros((nfull+bw,), dtype=float)
bi = np.concatenate([np.arange(i)+(bw-i)*(bw+1) for i in range(bw,0,-1)])
bo = np.concatenate([np.arange(i)+(bw-i)*bw for i in range(bw,0,-1)])
upper += 1
nowidth = np.invert(upper > lower)
for k in range(nn-nord+1):
if nowidth[k]:
continue
itop = k*npoly
alpha.T.flat[bo+itop*bw] \
+= np.dot(a2[lower[k]:upper[k],:].T, a2[lower[k]:upper[k],:]).flat[bi]
beta[itop:min(itop,nfull)+bw] \
+= np.dot(ydata[lower[k]:upper[k]] * np.sqrt(ivar[lower[k]:upper[k]]),
a2[lower[k]:upper[k],:])
upper -= 1
return alpha, beta
def cholesky_band(l, mininf=0.0):
"""
Compute Cholesky decomposition of banded matrix.
This function is pure python.
Parameters
----------
l : :class:`numpy.ndarray`
A matrix on which to perform the Cholesky decomposition.
mininf : :class:`float`, optional
Entries in the `l` matrix are considered negative if they are less
than this value (default 0.0).
Returns
-------
:func:`tuple`
If problems were detected, the first item will be the index or
indexes where the problem was detected, and the second item will simply
be the input matrix. If no problems were detected, the first item
will be -1, and the second item will be the Cholesky decomposition.
"""
# # TODO: Used for testing bspline
# np.savez_compressed('cholesky_band_l.npz', l=l, mininf=mininf)
# print(l.shape)
# raise ValueError('Entered band')
bw, nn = l.shape
n = nn - bw
negative = (l[0,:n] <= mininf) | np.invert(np.isfinite(l[0,:n]))
# JFH changed this below to make it more consistent with IDL version. Not sure
# why the np.all(np.isfinite(lower)) was added. The code could return an empty
# list for negative.nonzero() and crash if all elements in lower are NaN.
# KBW: Added the "or not finite" flags to negative.
if negative.any():
nz = negative.nonzero()[0]
warnings.warn('Found {0} bad entries: {1}'.format(nz.size, nz))
return nz, l
lower = l.copy()
kn = bw - 1
spot = np.arange(kn, dtype=int) + 1
bi = np.concatenate([np.arange(i)+(kn-i)*(kn+1) for i in range(kn,0,-1)])
here = bi[:,None] + (np.arange(n)[None,:] + 1)*bw
for j in range(n):
lower[0,j] = np.sqrt(lower[0,j])
lower[spot,j] /= lower[0,j]
if not np.all(np.isfinite(lower[spot,j])):
warnings.warn('NaN found in cholesky_band.')
return j, l
hmm = lower[spot,j,None] * lower[None,spot,j]
lower.T.flat[here[:,j]] -= hmm.flat[bi]
return -1, lower
def cholesky_solve(a, bb):
"""
Solve the equation Ax=b where A is a Cholesky-banded matrix.
This function is pure python.
Parameters
----------
a : :class:`numpy.ndarray`
:math:`A` in :math:`A x = b`.
bb : :class:`numpy.ndarray`
:math:`b` in :math:`A x = b`.
Returns
-------
:func:`tuple`
A tuple containing the status and the result of the solution. The
status is always -1.
"""
# # TODO: Used for testing bspline
# np.savez_compressed('cholesky_solve_abb.npz', a=a, bb=bb)
# print(a.shape)
# print(bb.shape)
# raise ValueError('Entered solve')
b = bb.copy()
n = b.shape[0] - a.shape[0]
kn = a.shape[0] - 1
spot = np.arange(kn, dtype=int) + 1
for j in range(n):
b[j] /= a[0,j]
b[j+spot] -= b[j]*a[spot,j]
for j in range(n-1, -1, -1):
b[j] = (b[j] - np.sum(a[spot,j] * b[j+spot]))/a[0,j]
return -1, b
|
"""
Tests for @timeout decorator
Date created: 8th June 2014
"""
from timeout import timeout, TimeoutError
import time
from nose.tools import assert_raises, assert_equals
class TestTimeout:
def setup(self):
pass
def teardown(self):
pass
def test_timeout_with_timeout_error(self):
"""Tests that TimeoutError is thrown for long functions"""
@timeout(1)
def dummy_computation():
time.sleep(2)
with assert_raises(TimeoutError) as cm:
dummy_computation()
def test_timeout_without_timeout_error(self):
"""Tests that for small functions, TimeoutError is not thrown"""
@timeout(1)
def dummy_computation():
time.sleep(0.2)
return 5
assert_equals(dummy_computation(), 5)
def test_value_and_type_errors(self):
"""Tests that ValueError & TypeError are thrown for incorrect furation"""
with assert_raises(TypeError) as cm:
@timeout(0.2)
def dummy_computation():
pass
with assert_raises(ValueError) as cm:
@timeout(-3)
def dummy_computation():
pass
|
from ..utility.expr_wrap_util import symbolic
from ..expr import BVV, BVS
from ..utility.models_util import get_arg_k
from ..sym_state import State
MAX_READ = 100
def read_handler(state: State, view):
fd = get_arg_k(state, 1, 4, view)
buf = get_arg_k(state, 2, state.arch.bits() // 8, view)
count = get_arg_k(state, 3, 4, view)
assert not symbolic(fd) or not state.solver.symbolic(fd)
fd = fd.value
assert state.os.is_open(fd)
if symbolic(count):
count = state.solver.max(count)
count = MAX_READ if count > MAX_READ else count
else:
count = count.value
res = state.os.read(fd, count)
for i, b in enumerate(res):
state.mem.store(buf + i, b)
state.events.append(
"read from fd %d, count %d" % (fd, count)
)
return BVV(count, 32)
def write_handler(state: State, view):
fd = get_arg_k(state, 1, 4, view)
buf = get_arg_k(state, 2, state.arch.bits() // 8, view)
count = get_arg_k(state, 3, 4, view)
assert not symbolic(fd) or not state.solver.symbolic(fd)
fd = fd.value
if symbolic(count):
count = state.solver.max(count)
count = MAX_READ if count > MAX_READ else count
else:
count = count.value
data = []
for i in range(count):
b = state.mem.load(buf + i, 1)
data.append(b)
state.os.write(fd, data)
state.events.append(
"write to fd %d, count %d" % (fd, count)
)
return BVV(count, 32)
stat_idx = 0
def _stat(state: State, statbuf):
global stat_idx
long_t = state.arch.bits()
int_t = 32
st_dev = BVS('stat_st_dev_%d' % stat_idx, long_t)
st_ino = BVS('stat_st_ino_%d' % stat_idx, long_t)
st_mode = BVS('stat_st_mode_%d' % stat_idx, long_t)
st_nlink = BVS('stat_st_nlink_%d' % stat_idx, long_t)
st_uid = BVS('stat_st_uid_%d' % stat_idx, int_t)
st_gid = BVS('stat_st_gid_%d' % stat_idx, int_t)
st_rdev = BVS('stat_st_rdev_%d' % stat_idx, long_t)
st_size = BVS('stat_st_size_%d' % stat_idx, long_t)
st_blksize = BVS('stat_st_blksize_%d' % stat_idx, long_t)
st_blocks = BVS('stat_st_blocks_%d' % stat_idx, long_t)
st_atim_tv_sec = BVS('stat_atim.sec_%d' % stat_idx, long_t)
st_atim_tv_nsec = BVS('stat_atim.nsec_%d' % stat_idx, long_t)
st_mtim_tv_sec = BVS('stat_mtim.sec_%d' % stat_idx, long_t)
st_mtim_tv_nsec = BVS('stat_mtim.nsec_%d' % stat_idx, long_t)
st_ctim_tv_sec = BVS('stat_ctim.sec_%d' % stat_idx, long_t)
st_ctim_tv_nsec = BVS('stat_ctim.nsec_%d' % stat_idx, long_t)
stat_idx += 1
state.mem.store(statbuf + 0, st_dev, state.arch.endness())
state.mem.store(statbuf + 8, st_ino, state.arch.endness())
state.mem.store(statbuf + 16, st_nlink, state.arch.endness())
state.mem.store(statbuf + 24, st_mode, state.arch.endness())
state.mem.store(statbuf + 32, st_uid, state.arch.endness())
state.mem.store(statbuf + 36, st_gid, state.arch.endness())
state.mem.store(statbuf + 40, BVV(0, 8*8)) # padding
state.mem.store(statbuf + 48, st_rdev, state.arch.endness())
state.mem.store(statbuf + 56, st_size, state.arch.endness())
state.mem.store(statbuf + 64, st_blksize, state.arch.endness())
state.mem.store(statbuf + 72, st_blocks, state.arch.endness())
state.mem.store(statbuf + 80, st_atim_tv_sec, state.arch.endness())
state.mem.store(statbuf + 88, st_atim_tv_nsec, state.arch.endness())
state.mem.store(statbuf + 96, st_mtim_tv_sec, state.arch.endness())
state.mem.store(statbuf + 104, st_mtim_tv_nsec, state.arch.endness())
state.mem.store(statbuf + 112, st_ctim_tv_sec, state.arch.endness())
state.mem.store(statbuf + 120, st_ctim_tv_nsec, state.arch.endness())
state.mem.store(statbuf + 128, BVV(0, 8*16)) # reserved (zero (?))
return BVV(0, 32)
def stat_handler(state: State, view):
global stat_idx
pathname = get_arg_k(state, 1, state.arch.bits() // 8, view)
statbuf = get_arg_k(state, 2, state.arch.bits() // 8, view)
path = ""
if not symbolic(pathname):
i = 0
c = state.mem.load(pathname, 1)
while not symbolic(c) and c.value != 0 and i < 100:
path += chr(c.value)
i += 1
c = state.mem.load(pathname+i, 1)
else:
path = "<symbolic>"
state.events.append(
"stat on %s" % path
)
return _stat(state, statbuf)
def xstat_handler(state: State, view):
version = get_arg_k(state, 1, 4, view)
pathname = get_arg_k(state, 2, state.arch.bits() // 8, view)
statbuf = get_arg_k(state, 3, state.arch.bits() // 8, view)
path = ""
if not symbolic(pathname):
i = 0
c = state.mem.load(pathname, 1)
while not symbolic(c) and c.value != 0 and i < 100:
path += chr(c.value)
i += 1
c = state.mem.load(pathname+i, 1)
else:
path = "<symbolic>"
if not symbolic(version):
version = str(version.value)
else:
version = "<symbolic>"
state.events.append(
"__xstat on %s. version %s" % (path, version)
)
return _stat(state, statbuf)
|
import sys
import os.path
def parse_inputs(fastq_file):
root,ext = os.path.splitext(fastq_file)
return root + ".removed" + ext, root + ".kept" + ext
def remove_reads(fastq_file, removed_reads_file, kept_reads_file):
f = open(fastq_file, "r")
g = open(removed_reads_file, "w+")
h = open(kept_reads_file, "w+")
prev_header = f.readline()
line = f.readline()
while line:
if len(line.rstrip()) < 18 or len(line.rstrip()) > 25:
g.write(prev_header)
g.write(line)
g.write(f.readline())
g.write(f.readline())
else:
h.write(prev_header)
h.write(line)
h.write(f.readline())
h.write(f.readline())
prev_header = f.readline()
line = f.readline()
f.close()
g.close()
h.close()
if __name__ == "__main__":
fastq_file = sys.argv[1]
removed_reads_file, kept_reads_file = parse_inputs(fastq_file)
remove_reads(fastq_file, removed_reads_file, kept_reads_file)
|
import heapq
class Solution:
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
rls = []
if len(nums1) <= 0 or len(nums2) <= 0:
return rls
pq = [(nums1[i] + nums2[0], i, 0) for i in range(len(nums1))]
heapq.heapify(pq)
for i in range(k):
if len(pq) <= 0:
break
s, idx1, idx2 = heapq.heappop(pq)
rls.append([nums1[idx1], nums2[idx2]])
if idx2 + 1 < len(nums2):
heapq.heappush(pq, (nums1[idx1] + nums2[idx2+1], idx1, idx2+1))
return rls
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mibandDevice.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mibandDevice.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=b'\n\x12mibandDevice.proto\"\x1a\n\nDeviceUUID\x12\x0c\n\x04UUID\x18\x01 \x01(\t\"*\n\nHeartBeats\x12\r\n\x05pulse\x18\x01 \x01(\t\x12\r\n\x05\x65rror\x18\x02 \x01(\t2=\n\x0cMibandDevice\x12-\n\rGetHeartBeats\x12\x0b.DeviceUUID\x1a\x0b.HeartBeats\"\x00\x30\x01\x62\x06proto3'
)
_DEVICEUUID = _descriptor.Descriptor(
name='DeviceUUID',
full_name='DeviceUUID',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='UUID', full_name='DeviceUUID.UUID', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=48,
)
_HEARTBEATS = _descriptor.Descriptor(
name='HeartBeats',
full_name='HeartBeats',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pulse', full_name='HeartBeats.pulse', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='HeartBeats.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=50,
serialized_end=92,
)
DESCRIPTOR.message_types_by_name['DeviceUUID'] = _DEVICEUUID
DESCRIPTOR.message_types_by_name['HeartBeats'] = _HEARTBEATS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeviceUUID = _reflection.GeneratedProtocolMessageType('DeviceUUID', (_message.Message,), {
'DESCRIPTOR' : _DEVICEUUID,
'__module__' : 'mibandDevice_pb2'
# @@protoc_insertion_point(class_scope:DeviceUUID)
})
_sym_db.RegisterMessage(DeviceUUID)
HeartBeats = _reflection.GeneratedProtocolMessageType('HeartBeats', (_message.Message,), {
'DESCRIPTOR' : _HEARTBEATS,
'__module__' : 'mibandDevice_pb2'
# @@protoc_insertion_point(class_scope:HeartBeats)
})
_sym_db.RegisterMessage(HeartBeats)
_MIBANDDEVICE = _descriptor.ServiceDescriptor(
name='MibandDevice',
full_name='MibandDevice',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=94,
serialized_end=155,
methods=[
_descriptor.MethodDescriptor(
name='GetHeartBeats',
full_name='MibandDevice.GetHeartBeats',
index=0,
containing_service=None,
input_type=_DEVICEUUID,
output_type=_HEARTBEATS,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_MIBANDDEVICE)
DESCRIPTOR.services_by_name['MibandDevice'] = _MIBANDDEVICE
# @@protoc_insertion_point(module_scope)
|
# Importing the packages required
from bs4 import BeautifulSoup
import requests
# create a function to extract only text from <p> tags
def get_only_text(url):
''' return the title and text from the headline of
the spesfied url'''
page = requests.get(url)
soup = BeautifulSoup(page.content, "lxml")
text = ' '.join(map(lambda p: p.text , soup.find_all('p')))
title = ' '.join(soup.title.stripped_strings)
return title, text
# calling the function with spesfied url
text = get_only_text('https://www.voxmedia.com/2021/11/19/22791332/the-second-season-of-vox-and-vox-media-studios-the-mind-explained-premieres-today-on-netflix')
print(text) # verifying the results
print(len(str.split(text[1]))) # getting the number of words in the text extracted
# Summerization using gemsim
from gensim.summarization.summarizer import summarize
from gensim.summarization import keywords
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import auth
from tempest import config
from tempest import exceptions
CONF = config.CONF
class Manager(object):
"""
Base manager class
Manager objects are responsible for providing a configuration object
and a client object for a test case to use in performing actions.
"""
def __init__(self, username=None, password=None, tenant_name=None):
"""
We allow overriding of the credentials used within the various
client classes managed by the Manager object. Left as None, the
standard username/password/tenant_name[/domain_name] is used.
:param credentials: Override of the credentials
"""
self.auth_version = CONF.identity.auth_version
# FIXME(andreaf) Change Manager __init__ to accept a credentials dict
if username is None or password is None:
# Tenant None is a valid use case
self.credentials = self.get_default_credentials()
else:
self.credentials = dict(username=username, password=password,
tenant_name=tenant_name)
if self.auth_version == 'v3':
self.credentials['domain_name'] = 'Default'
# Creates an auth provider for the credentials
self.auth_provider = self.get_auth_provider(self.credentials)
# FIXME(andreaf) unused
self.client_attr_names = []
# we do this everywhere, have it be part of the super class
def _validate_credentials(self, username, password, tenant_name):
if None in (username, password, tenant_name):
msg = ("Missing required credentials. "
"username: %(u)s, password: %(p)s, "
"tenant_name: %(t)s" %
{'u': username, 'p': password, 't': tenant_name})
raise exceptions.InvalidConfiguration(msg)
@classmethod
def get_auth_provider_class(cls, auth_version):
if auth_version == 'v2':
return auth.KeystoneV2AuthProvider
else:
return auth.KeystoneV3AuthProvider
def get_default_credentials(self):
return dict(
username=CONF.identity.username,
password=CONF.identity.password,
tenant_name=CONF.identity.tenant_name
)
def get_auth_provider(self, credentials):
if credentials is None:
raise exceptions.InvalidCredentials(
'Credentials must be specified')
auth_provider_class = self.get_auth_provider_class(self.auth_version)
return auth_provider_class(
client_type=getattr(self, 'client_type', None),
interface=getattr(self, 'interface', None),
credentials=credentials)
|
from flask import Flask, render_template, flash
from form import SetTotp
import onetimepass
import pyqrcode
import base64
app = Flask(__name__, static_url_path='/static')
app.config['SECRET_KEY'] = '5791628bb0b13ce0c676dfde280ba245'
@app.route("/", methods=['GET','POST'])
@app.route("/index", methods=['GET','POST'])
def index():
label = "FV Example"
mail = "fvillalobos@medioclick.com"
#TODO: Con la Herramineta totp, se puede genera un secret aleatoriio, pero se deja fijo para realizar pruebas.
secret = "JBSWY3DPEHPK3PXP"
issuer = "FV Example"
chl = "otpauth://totp/{0}:{1}?secret={2}&issuer={3}".format(label,mail,secret,issuer)
secret = chl
# var_url = "https://chart.googleapis.com/chart?chs=200x200&chld=M|0&cht=qr&chl="+ chl.encode('ascii', 'xmlcharrefreplace') # se podria reemplazar por data.encode('ascii', 'xmlcharrefreplace')
var_url = pyqrcode.create(chl)
var_url.svg('static/uca.svg', scale=4)
# var_url.svg('uca-url.svg', scale=8)
# totp = ROTP::TOTP.new(secret, issuer: issuer)
topt = onetimepass.get_totp(b'JBSWY3DPEHPK3PXP', 3)
result = ""
form = SetTotp()
if form.validate_on_submit():
if onetimepass.valid_totp(form.totp_code(), secret):
result = "Correcto"
else:
result = "Incorrecto"
else:
result = "ERROR: REVISAR CODIGO"
return render_template('totp.html', form=form, var_url=var_url, secret=secret, result=result,topt=topt)
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0')
|
import pandas as pd, numpy as np
def get_matches_from_list(filenames,sheetnames,column_keys,skiprows,shift=0):
matches=[]
if type(filenames)!=list:
filenames=[filenames]
if type(sheetnames)!=list:
sheetnames=[sheetnames]
for filename in filenames:
for sheetname in sheetnames:
df=pd.read_excel(filename,sheet_name=sheetname,skiprows=skiprows,header=None)
df[column_keys['match_type']+shift]=df[column_keys['match_type']+shift].fillna(method='ffill')
for i in df.T.iteritems():
match={}
for j in column_keys:
if type(column_keys[j])==dict:
match[j]={}
for k in column_keys[j]:
match[j][k]=i[1][column_keys[j][k]+shift]
elif j=='match_type':
match[j]=filename+'#'+str(i[1][column_keys[j]+shift])
else:
match[j]=i[1][column_keys[j]+shift]
matches.append(match)
return matches
def get_matches_from_table(filename,sheetnames,skiprows,nrows=0,shift=0,drops=[]):
matches=[]
if type(sheetnames)!=list:
sheetnames=[sheetnames]
for sheetname in sheetnames:
df=pd.read_excel(filename,sheet_name=sheetname,header=None,skiprows=skiprows)
df=df[df.columns[shift:]].drop(drops,axis=1)
df=df.reset_index(drop=True)
df.columns=range(len(df.columns))
if nrows>0:
df=df.loc[:nrows]
for i in range(len(df.index)//2):
for j in range(1,len(df.index)//2+1):
if i<(j-1):
match={'match_type':sheetname,'aka':{'name':df.loc[i*2][0],
'point1':df.loc[i*2+1][j*2],'point2':df.loc[i*2+1][j*2+1]},
'shiro':{'name':df.loc[(j-1)*2][0],
'point1':df.loc[(j-1)*2+1][(i+1)*2],'point2':df.loc[(j-1)*2+1][(i+1)*2+1]}}
matches.append(match)
return matches
def get_matches_from_table_oneliner(filename,sheetnames,skiprows,nrows=0,shift=0,point_shift=1,drops=[]):
matches=[]
if type(sheetnames)!=list:
sheetnames=[sheetnames]
for sheetname in sheetnames:
df=pd.read_excel(filename,sheet_name=sheetname,header=None,skiprows=skiprows)
df=df[df.columns[shift:]].drop(drops,axis=1)
df=df.reset_index(drop=True)
df.columns=range(len(df.columns))
if nrows>0:
df=df.loc[:nrows-1]
for i in range(len(df.index)):
for j in range(1,len(df.index)+1):
if i<(j-1):
match={'match_type':sheetname,'aka':{'name':df.loc[i][0],
'point1':df.loc[i][j+point_shift]},
'shiro':{'name':df.loc[(j-1)][0],
'point1':df.loc[(j-1)][i+point_shift+1]}}
matches.append(match)
return matches |
#!/usr/bin/env python
import ROOT
ROOT.gROOT.SetBatch(True)
ROOT.PyConfig.IgnoreCommandLineOptions = True
import math
# Run NVtxDistribution selector on some DY and DoubleMuon to derive scale factor
f = ROOT.TFile.Open('NVtxDistribution-DYandData.root')
hmc = f.Get('DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/nvtxSelection')
hdata = f.Get('DoubleMuon/nvtxSelection')
nBins = 75
stackData = [hdata.GetBinContent(i) for i in range(1, nBins+1)]
stackMC = [hmc.GetBinContent(i) for i in range(1, nBins+1)]
stackMCerr = [hmc.GetBinError(i) for i in range(1, nBins+1)]
stackDataNormed = map(lambda i: i/sum(stackData), stackData)
stackMCNormed = map(lambda i: i/sum(stackMC), stackMC)
stackNvtx = ROOT.TH1D('nvtx', 'nvtx reweighting for 2016 ReReco vs. RunIISummer16 (Tranche4) MC', nBins, 0, nBins)
for b in range(nBins):
d = stackDataNormed[b]
de = math.sqrt(stackData[b]) / sum(stackData)
m = stackMCNormed[b]
me = stackMCerr[b] / sum(stackMC)
if m>0 and d>0:
r = d/m
re = math.sqrt( (de/d)**2 + (me/m)**2 )
else:
r = 1.
re = 1.
print "Bin %d: %f +- %f" % (b, r, re)
stackNvtx.SetBinContent(b+1, r)
stackNvtx.SetBinError(b+1, re)
fOut = ROOT.TFile('data/nvtxReRecoReweight.root', 'recreate')
fOut.cd()
stackNvtx.Write()
|
from colorama import Fore, Style
import os
import pandas as pd
def clear_screen():
os.system('cls' if os.name == 'nt' else 'clear')
def to_int(s):
try:
return int(s)
except:
return float("INF")
def print_menu():
clear_screen()
print(f"Olá, {NAME}! Escolha uma opção\n\n")
print("1. Trocar nome")
print("2. Quanto devo para cada um?")
print("3. Quem e quanto me devem?")
print("4. Pagar alguém")
print("5. Roda de pagamento")
print("6. Atualizar DataFrame")
print("\n0. Sair")
option = to_int(input("\nEscolha uma opção: "))
while option not in range(7):
print(f"{Fore.RED}ERRO - Esse opção não é válida. Tente novamente.{Style.RESET_ALL}")
option = to_int(input("\nEscolha uma opção: "))
clear_screen()
return option
def set_name(mes="Digite o seu primeiro nome"):
name = input(mes + ": ").capitalize()
while name not in NAMES:
print(f"{Fore.RED}ERRO - Esse nome não está cadastrado. Tente novamente.{Style.RESET_ALL}")
name = input("Digite o seu primeiro nome: ").capitalize()
clear_screen()
return name
def get_data(d):
if d == '2' or d == '2.0':
return "Pagou"
elif d == '1' or d == '1.0':
return "Não Pagou"
elif d == 'nan':
return " - "
return d
def print_cols():
for col in list(gastos)[:12]:
if len(col) < 8:
print(col[:15], end="\t\t")
else:
print(col[:15], end="\t")
print()
def print_row(row):
for data in row[:12]:
data = get_data(str(data))
if len(data) < 8:
print(data[:15], end="\t\t")
else:
print(data[:15], end="\t")
print()
def print_rows_that_you_used():
for index, row in gastos.iterrows():
if not pd.isna(row[NAME]):
print_row(row)
def calculate_debits_per_person():
debits = [0]*len(NAMES)
for index, row in gastos.iterrows():
if row[NAME] == 1:
idx = NAMES.index(row["Quem pagou"])
debits[idx] += float(row["Preço por pessoa"].split()[-1].replace(",", "."))
return debits
def print_debits_per_person(debits):
total = 0
for idx, name in enumerate(NAMES):
if name == NAME: continue
total += debits[idx]
if len(name) < 7:
print(f"{name}:\t R${'%.2f' % debits[idx]}")
elif len(name) < 8:
print(f"{name}: R${'%.2f' % debits[idx]}")
else:
print(f"{name}: R${'%.2f' % debits[idx]}")
print(f"\nTOTAL:\t R${'%.2f' % total}")
def debits():
print(f"{NAME}, aqui estão seus débitos: ")
print_cols()
print_rows_that_you_used()
print("\nQuanto você deve para cada um:\n")
debits = calculate_debits_per_person()
print_debits_per_person(debits)
input("\n\nPress ENTER to return to menu")
def print_rows_that_you_paid():
for index, row in gastos.iterrows():
if row["Quem pagou"] == NAME:
print_row(row)
def calculate_credits_per_person():
credits = [0]*len(NAMES)
for index, row in gastos.iterrows():
if row["Quem pagou"] == NAME:
for idx, name in enumerate(NAMES):
if row[name] == 1:
credits[idx] += float(row["Preço por pessoa"].split()[-1].replace(",", "."))
return credits
def print_credits_per_person(credits):
total = 0
for idx, name in enumerate(NAMES):
if name == NAME: continue
total += credits[idx]
if len(name) < 7:
print(f"{name}:\t R${'%.2f' % credits[idx]}")
elif len(name) < 8:
print(f"{name}: R${'%.2f' % credits[idx]}")
else:
print(f"{name}: R${'%.2f' % credits[idx]}")
print(f"\nTOTAL:\t R${'%.2f' % total}")
def credits():
print(f"{NAME}, aqui estão seus créditos: \n")
print_cols()
print_rows_that_you_paid()
print("\nQuanto cada um te deve:\n")
credits = calculate_credits_per_person()
print_credits_per_person(credits)
input("\n\nPress ENTER to return to menu")
def pay():
receiver = set_name("Digite o nome de quem você quer pagar")
idx = NAMES.index(receiver)
credits = calculate_credits_per_person()
debits = calculate_debits_per_person()
if credits[idx]-debits[idx] >= 0:
print(f"Que maravilha, {NAME}! Você não deve nada para {receiver}.")
else:
debit = debits[idx]-credits[idx]
print(f"Pelos nossos calculos, você deve R${'%.2f' % debit} para {receiver}.\n")
print("Para pagar, basta enviar esse valor para a conta abaixo:")
for index, row in dados_bancarios.iterrows():
if len(row["Dado"]) < 7:
print(row["Dado"] + ": \t\t" + str(row[receiver]))
else:
print(row["Dado"] + ": \t" + str(row[receiver]))
input("\n\nPress ENTER to return to menu")
def roda_de_gastos():
global NAME
old_name = NAME
all_debits = []
for name in NAMES:
NAME = name
credits = calculate_credits_per_person()
debits = calculate_debits_per_person()
diff = []
for i in range(len(credits)):
diff.append(debits[i]-credits[i])
all_debits.append([sum(diff), NAME])
all_debits = sorted(all_debits)[::-1]
while(len(all_debits) > 1):
print(f"{all_debits[0][1]} paga R${'%.2f' % all_debits[0][0]} ao {all_debits[-1][1]}")
all_debits[-1][0] += all_debits[0][0]
all_debits = sorted(all_debits[1:])[::-1]
input("\n\nPress ENTER to return to menu")
NAME = old_name
def update_gastos():
gastos = pd.read_csv("https://docs.google.com/spreadsheets/d/1dCYfYqVfgioZQ5YXxrmSPXuiudIbfHvW4jj9tQQBq20/export?gid=0&format=csv")
dados_bancarios = pd.read_csv("https://docs.google.com/spreadsheets/d/1dCYfYqVfgioZQ5YXxrmSPXuiudIbfHvW4jj9tQQBq20/export?gid=1968442958&format=csv")
return gastos, dados_bancarios
clear_screen()
print(" ___________________________________________________")
print(f"/ {Fore.WHITE}Sejam bem-vindos, meus nobres puladores!{Style.RESET_ALL} \\")
print(f"\\ {Fore.WHITE}Hora de contar quantos pulos cada um está devendo{Style.RESET_ALL} /")
print(" ---------------------------------------------------")
print(" \\")
print(" \\")
print(f"{Fore.GREEN} oO)-. .-(@@\\")
print(" /__ _\ /_ __\\")
print(" \ \( | ()~() | )/ /\\")
print(" \__|\ | (-___-) | /|__/\\")
print(" ' '--' ==`-'== '--' '\\")
print(f"{Style.RESET_ALL}\n\n")
input("Press ENTER to continue")
clear_screen()
gastos, dados_bancarios = update_gastos()
NAMES = ["Breno", "Bruno", "Caio", "Emanuel", "Henrique", "Pedro", "Rafael"]
NAME = set_name()
option = print_menu()
while option:
clear_screen()
if option == 1:
NAME = set_name()
elif option == 2:
debits()
elif option == 3:
credits()
elif option == 4:
pay()
elif option == 5:
roda_de_gastos()
elif option == 6:
gastos, dados_bancarios = update_gastos()
clear_screen()
option = print_menu()
|
from fatalattractors import attractors
import buchi as buchi
import copy
from graph import Graph
def buchi_inter_safety_player(g, u, s, j):
"""
Solves a Buchi inter safety game where player j has that objective. U is the set to be visited infinitely often
and s is to be avoided.
:param g: a game graph.
:param u: the set od nodes to be visited infinitely often.
:param s: the set of nodes to be avoid.
:param j: the player with the Buchi inter safety objective.
:return: the winning regions w_0, w_1.
"""
a, not_a = attractors.attractor(g, s, (j + 1) % 2)
g_reduced = g.subgame(not_a)
# TODO check if it is required to return both winning regions
return buchi.buchi_classical_player(g_reduced, u, j)
def buchi_inter_safety(g, u, s):
"""
Solves a Buchi inter safety game where player 0 has that objective. U is the set to be visited infinitely often
and s is to be avoided.
:param g: a game graph.
:param u: the set od nodes to be visited infinitely often.
:param s: the set of nodes to be avoid.
:return: the winning regions w_0, w_1.
"""
a, not_a = attractors.attractor(g, s, 1)
g_reduced = g.subgame(not_a)
return buchi.buchi_classical(g_reduced, u)
def buchi_inter_safety_transformation(g, u, s):
"""
Solves a Buchi inter safety game where player 0 has that objective. U is the set to be visited infinitely often
and s is to be avoided. This is solved by creating a new game arena where every node in s is replaced by a sink and
predecessors have edges leading to that sink. The classical Buchi algorithm is then applied.
:param g: a game graph.
:param u: the set od nodes to be visited infinitely often.
:param s: the set of nodes to be avoid.
:return: the winning regions w_0, w_1.
"""
# Work with a copy which we modify
g_copy = copy.deepcopy(g) # type: Graph
# Create the sink
g_copy.add_node(-1, (0, 0))
g_copy.add_successor(-1, -1)
g_copy.add_predecessor(-1, -1)
for node in s:
# TODO should we remove predecessors and successors of these nodes
g_copy.remove_node(node)
del g_copy.successors[node]
# delete bad nodes, remove their successors
# fix their predecessors by removing them from the successors and replace rc by arc to -1
for prede in g_copy.get_predecessors(node):
g_copy.successors[prede] = [-1 if k == node else k for k in g_copy.successors[prede]]
return buchi.buchi_classical(g_copy, u)
|
# Copyright 2017 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose import SkipTest
from nose.tools import with_setup
from ..connection.info import custom_setup, custom_teardown, get_skip_msg
from ucsmsdk.utils.ucsbackup import backup_ucs
from ucsmsdk.ucshandle import UcsHandle
handle = None
def setup_module():
global handle
handle = custom_setup()
if not handle:
msg = get_skip_msg()
raise SkipTest(msg)
def teardown_module():
custom_teardown(handle)
def _test_ucs_backup(handle, file_dir, file_name, backup_type):
backup_ucs(handle,
backup_type=backup_type,
file_dir=file_dir,
file_name=file_name)
@with_setup(setup_module, teardown_module)
def test_ucs_backup():
_test_ucs_backup(handle, file_dir="/tmp/backup",
file_name="config1.xml",
backup_type="config-logical")
def test_ucs_backup_after_freeze_unfreeze():
# for this test to be more meaningful there needs to be proxy server
# configured
h1 = custom_setup()
frozen_handle = h1.freeze()
h2 = UcsHandle.unfreeze(frozen_handle)
# Try a download operation using new handle
_test_ucs_backup(h2, file_dir="/tmp/backup",
file_name="config2.xml",
backup_type="config-logical")
custom_teardown(h2)
|
import brambox as bb
import os
from os.path import join, basename
from pathflowai.utils import load_sql_df, npy2da
import skimage
import dask, dask.array as da, pandas as pd, numpy as np
import argparse
from scipy import ndimage
from scipy.ndimage.measurements import label
import pickle
from dask.distributed import Client
from multiprocessing import Pool
from functools import reduce
def get_box(l,prop):
c=[prop.centroid[1], prop.centroid[0]]
# l=rev_label[i+1]
width = prop.bbox[3] - prop.bbox[1] + 1
height = prop.bbox[2] - prop.bbox[0] + 1
wh=max(width,height)
# c = [ci-wh/2 for ci in c]
return [l]+c+[wh]
def get_boxes(m,ID='test',x='x',y='y',patch_size='patchsize', num_classes=3):
lbls,n_lbl=label(m)
obj_labels={}
for i in range(1,num_classes+1):
obj_labels[i]=np.unique(lbls[m==i].flatten())
rev_label={}
for k in obj_labels:
for i in obj_labels[k]:
rev_label[i]=k
rev_label={k:rev_label[k] for k in sorted(list(rev_label.keys()))}
objProps = list(skimage.measure.regionprops(lbls))
#print(len(objProps),len(rev_label))
boxes=dask.compute(*[dask.delayed(get_box)(rev_label[i],objProps[i-1]) for i in list(rev_label.keys())],scheduler='threading') # [get_box(rev_label[i],objProps[i-1]) for i in list(rev_label.keys())]#
#print(boxes)
boxes=pd.DataFrame(np.array(boxes).astype(int),columns=['class_label','x_top_left','y_top_left','width'])
#boxes['class_label']=m[boxes[['x_top_left','y_top_left']].values.T.tolist()]
boxes['height']=boxes['width']
boxes['image']='{}/{}/{}/{}'.format(ID,x,y,patch_size)
boxes=boxes[['image','class_label','x_top_left','y_top_left','width','height']]
boxes.loc[:,'x_top_left']=np.clip(boxes.loc[:,'x_top_left'],0,m.shape[1])
boxes.loc[:,'y_top_left']=np.clip(boxes.loc[:,'y_top_left'],0,m.shape[0])
bbox_df=bb.util.new('annotation').drop(columns=['difficult','ignore','lost','occluded','truncated'])[['image','class_label','x_top_left','y_top_left','width','height']]
bbox_df=bbox_df.append(boxes)
#print(boxes)
return boxes
if __name__=='__main__':
p=argparse.ArgumentParser()
p.add_argument('--num_classes',default=4,type=int)
p.add_argument('--patch_size',default=512,type=int)
p.add_argument('--n_workers',default=40,type=int)
p.add_argument('--p_sample',default=0.7,type=float)
p.add_argument('--input_dir',default='inputs',type=str)
p.add_argument('--patch_info_file',default='cell_info.db',type=str)
p.add_argument('--reference_mask',default='reference_mask.npy',type=str)
#c=Client()
# add mode to just use own extracted boudning boxes or from seg, maybe from histomicstk
args=p.parse_args()
num_classes=args.num_classes
n_workers=args.n_workers
input_dir=args.input_dir
patch_info_file=args.patch_info_file
patch_size=args.patch_size
p_sample=args.p_sample
np.random.seed(42)
annotation_file = 'annotations_bbox_{}.pkl'.format(patch_size)
reference_mask=args.reference_mask
if not os.path.exists('widths.pkl'):
m=np.load(reference_mask)
bbox_df=get_boxes(m)
official_widths=dict(bbox_df.groupby('class_label')['width'].mean()+2*bbox_df.groupby('class_label')['width'].std())
pickle.dump(official_widths,open('widths.pkl','wb'))
else:
official_widths=pickle.load(open('widths.pkl','rb'))
patch_info=load_sql_df(patch_info_file, patch_size)
IDs=patch_info['ID'].unique()
#slides = {slide:da.from_zarr(join(input_dir,'{}.zarr'.format(slide))) for slide in IDs}
masks = {mask:npy2da(join(input_dir,'{}_mask.npy'.format(mask))) for mask in IDs}
if p_sample < 1.:
patch_info=patch_info.sample(frac=p_sample)
if not os.path.exists(annotation_file):
bbox_df=bb.util.new('annotation').drop(columns=['difficult','ignore','lost','occluded','truncated'])[['image','class_label','x_top_left','y_top_left','width','height']]
else:
bbox_df=bb.io.load('pandas',annotation_file)
patch_info=patch_info[~np.isin(np.vectorize(lambda i: '/'.join(patch_info.iloc[i][['ID','x','y','patch_size']].astype(str).tolist()))(np.arange(patch_info.shape[0])),set(bbox_df.image.cat.categories))]
print(patch_info.shape[0])
def get_boxes_point_seg(m,ID,x,y,patch_size2,num_classes):
bbox_dff=get_boxes(m,ID=ID,x=x,y=y,patch_size=patch_size2, num_classes=num_classes)
for i in official_widths.keys():
bbox_dff.loc[bbox_dff['class_label']==i,'width']=int(official_widths[i])
bbox_dff.loc[:,'x_top_left']=(bbox_dff.loc[:,'x_top_left']-bbox_dff['width']/2.).astype(int)
bbox_dff.loc[:,'y_top_left']=(bbox_dff.loc[:,'y_top_left']-bbox_dff['width']/2.).astype(int)
bbox_dff.loc[:,'x_top_left']=np.clip(bbox_dff.loc[:,'x_top_left'],0,m.shape[1])
bbox_dff.loc[:,'y_top_left']=np.clip(bbox_dff.loc[:,'y_top_left'],0,m.shape[0])
return bbox_dff
def process_chunk(patch_info_sub):
patch_info_sub=patch_info_sub.reset_index(drop=True)
bbox_dfs=[]
for i in range(patch_info_sub.shape[0]):
#print(i)
patch=patch_info_sub.iloc[i]
ID,x,y,patch_size2=patch[['ID','x','y','patch_size']].tolist()
m=masks[ID][x:x+patch_size2,y:y+patch_size2]
bbox_dff=get_boxes_point_seg(m,ID,x,y,patch_size2,num_classes)#dask.delayed(get_boxes_point_seg)(m,ID,x,y,patch_size2)
#print(bbox_dff)
bbox_dfs.append(bbox_dff)
return bbox_dfs
patch_info_subs=np.array_split(patch_info,n_workers)
p=Pool(n_workers)
bbox_dfs=reduce(lambda x,y:x+y,p.map(process_chunk,patch_info_subs))
#bbox_dfs=dask.compute(*bbox_dfs,scheduler='processes')
bbox_df=pd.concat([bbox_df]+bbox_dfs)
bbox_df.loc[:,'height']=bbox_df['width']
bb.io.save(bbox_df,'pandas',annotation_file)
|
# -*- coding: utf-8 -*-
from puremvc.patterns.mediator import Mediator
class MainWindowMediator(Mediator):
"""
"""
NAME = 'MainWindowMediator'
def __init__(self, mediatorName=None, viewComponent=None):
super(MainWindowMediator, self).__init__(mediatorName, viewComponent)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 31 22:20:24 2020
@author: dykua
helpler functions for onnx face detection
"""
import numpy as np
import cv2
def area_of(left_top, right_bottom):
"""
Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
def iou_of(boxes0, boxes1, eps=1e-5):
"""
Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Perform hard non-maximum-supression to filter out boxes with iou greater
than threshold
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
indexes = np.argsort(scores)
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(
rest_boxes,
np.expand_dims(current_box, axis=0),
)
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
def predict(width, height, confidences, boxes, prob_threshold, iou_threshold=0.5, top_k=-1):
"""
Select boxes that contain human faces
Args:
width: original image width
height: original image height
confidences (N, 2): confidence array
boxes (N, 4): boxes array in corner-form
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
Returns:
boxes (k, 4): an array of boxes kept
labels (k): an array of labels for each boxes kept
probs (k): an array of probabilities for each boxes being in corresponding labels
"""
boxes = boxes[0]
confidences = confidences[0]
#print(boxes)
#print(confidences)
picked_box_probs = []
picked_labels = []
for class_index in range(1, confidences.shape[1]):
#print(confidences.shape[1])
probs = confidences[:, class_index]
#print(probs)
mask = probs > prob_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = boxes[mask, :]
#print(subset_boxes)
box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = hard_nms(box_probs,
iou_threshold=iou_threshold,
top_k=top_k,
)
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if not picked_box_probs:
return np.array([]), np.array([]), np.array([])
picked_box_probs = np.concatenate(picked_box_probs)
picked_box_probs[:, 0] *= width
picked_box_probs[:, 1] *= height
picked_box_probs[:, 2] *= width
picked_box_probs[:, 3] *= height
return picked_box_probs[:, :4].astype(np.int32), np.array(picked_labels), picked_box_probs[:, 4]
class BBox(object):
# bbox is a list of [left, right, top, bottom]
def __init__(self, bbox):
self.left = bbox[0]
self.right = bbox[1]
self.top = bbox[2]
self.bottom = bbox[3]
self.x = bbox[0]
self.y = bbox[2]
self.w = bbox[1] - bbox[0]
self.h = bbox[3] - bbox[2]
# scale to [0,1]
def projectLandmark(self, landmark):
landmark_= np.asarray(np.zeros(landmark.shape))
for i, point in enumerate(landmark):
landmark_[i] = ((point[0]-self.x)/self.w, (point[1]-self.y)/self.h)
return landmark_
# landmark of (5L, 2L) from [0,1] to real range
def reprojectLandmark(self, landmark):
landmark_= np.asarray(np.zeros(landmark.shape))
for i, point in enumerate(landmark):
x = point[0] * self.w + self.x
y = point[1] * self.h + self.y
landmark_[i] = (x, y)
return landmark_
object_pts = np.float32([[6.825897, 6.760612, 4.402142],
[1.330353, 7.122144, 6.903745],
[-1.330353, 7.122144, 6.903745],
[-6.825897, 6.760612, 4.402142],
[5.311432, 5.485328, 3.987654],
[1.789930, 5.393625, 4.413414],
[-1.789930, 5.393625, 4.413414],
[-5.311432, 5.485328, 3.987654],
[2.005628, 1.409845, 6.165652],
[-2.005628, 1.409845, 6.165652]])
reprojectsrc = np.float32([[10.0, 10.0, 10.0],
[10.0, 10.0, -10.0],
[10.0, -10.0, -10.0],
[10.0, -10.0, 10.0],
[-10.0, 10.0, 10.0],
[-10.0, 10.0, -10.0],
[-10.0, -10.0, -10.0],
[-10.0, -10.0, 10.0]])
line_pairs = [[0, 1], [1, 2], [2, 3], [3, 0],
[4, 5], [5, 6], [6, 7], [7, 4],
[0, 4], [1, 5], [2, 6], [3, 7]]
def get_head_pose(shape, img):
h, w, _ = img.shape
K = [w, 0.0, w // 2,
0.0, w, h // 2,
0.0, 0.0, 1.0]
D = [0, 0, 0.0, 0.0, 0]
cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)
image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
shape[39], shape[42], shape[45], shape[31], shape[35]])
_, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)
reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix, dist_coeffs)
reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))
rotation_mat, _ = cv2.Rodrigues(rotation_vec)
pose_mat = cv2.hconcat((rotation_mat, translation_vec))
_, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)
return reprojectdst, euler_angle
# def drawLandmark(img, bbox, landmark):
# '''
# Input:
# - img: gray or RGB
# - bbox: type of BBox
# - landmark: reproject landmark of (5L, 2L)
# Output:
# - img marked with landmark and bbox
# '''
# img_ = img.copy()
# cv2.rectangle(img_, (bbox.left, bbox.top), (bbox.right, bbox.bottom), (0,0,255), 2)
# for x, y in landmark:
# cv2.circle(img_, (int(x), int(y)), 3, (0,255,0), -1)
# return img_
# def drawLandmark_multiple(img, bbox, landmark):
# '''
# Input:
# - img: gray or RGB
# - bbox: type of BBox
# - landmark: reproject landmark of (5L, 2L)
# Output:
# - img marked with landmark and bbox
# '''
# cv2.rectangle(img, (bbox.left, bbox.top), (bbox.right, bbox.bottom), (0,0,255), 2)
# for x, y in landmark:
# cv2.circle(img, (int(x), int(y)), 2, (0,255,0), -1)
# return img
# def drawLandmark_Attribute(img, bbox, landmark,gender,age):
# '''
# Input:
# - img: gray or RGB
# - bbox: type of BBox
# - landmark: reproject landmark of (5L, 2L)
# Output:
# - img marked with landmark and bbox
# '''
# cv2.rectangle(img, (bbox.left, bbox.top), (bbox.right, bbox.bottom), (0,0,255), 2)
# for x, y in landmark:
# cv2.circle(img, (int(x), int(y)), 3, (0,255,0), -1)
# if gender.argmax()==0:
# # -1->female, 1->male; -1->old, 1->young
# cv2.putText(img, 'female', (int(bbox.left), int(bbox.top)),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 3)
# else:
# cv2.putText(img, 'male', (int(bbox.left), int(bbox.top)),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0),3)
# if age.argmax()==0:
# cv2.putText(img, 'old', (int(bbox.right), int(bbox.bottom)),cv2.FONT_HERSHEY_SIMPLEX, 1,(255, 255, 0), 3)
# else:
# cv2.putText(img, 'young', (int(bbox.right), int(bbox.bottom)),cv2.FONT_HERSHEY_SIMPLEX, 1,(255, 255, 0), 3)
# return img
# def drawLandmark_only(img, landmark):
# '''
# Input:
# - img: gray or RGB
# - bbox: type of BBox
# - landmark: reproject landmark of (5L, 2L)
# Output:
# - img marked with landmark and bbox
# '''
# img_=img.copy()
# #cv2.rectangle(img_, (bbox.left, bbox.top), (bbox.right, bbox.bottom), (0,0,255), 2)
# for x, y in landmark:
# cv2.circle(img_, (int(x), int(y)), 3, (0,255,0), -1)
# return img_
|
print('''
The task here is to remove all the white-spaces
from the string. For this purpose, we need to
traverse the string and check if any character
of the string is matched with a white-space
character or not. If so, Use any built-in method
like replace() with a blank.
''')
string=input('Enter string here: ')
#using in-built function in python
#Here, replace space character with blank
string=string.replace(" ","")
print(f'String after removing all the white spaces: \"{string}\"') |
#!/usr/local/bin/python
#
# Copyright (c) 2013 University of Pennsylvania
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
if sys.hexversion < 0x020700F0:
print "Detected Python " + sys.version
sys.exit("***ERROR: Must be using Python 2.7.x (recommended) or above")
import argparse
import subprocess
import os
import datetime
import time
import re
import os.path
from distutils import spawn
RSCRIPT=spawn.find_executable("Rscript")
if RSCRIPT is None:
print "***ERROR: Rscript is not found"
sys.exit("Please instal R / Rscript or make sure it is in the PATH")
SAMTOOLS=spawn.find_executable("samtools")
if SAMTOOLS is None:
print "***ERROR: samtools is not found"
sys.exit("Please install samtools or make sure it is in the PATH")
# command line arguments
parser = argparse.ArgumentParser(description="Takes a bam file that has been sorted with redundant reads removed and generates a HAMR predicted_mods.txt output")
parser.add_argument('bam',help='A sorted bam file consisting of nonredundant reads')
parser.add_argument('genome_fas',help='Genome fasta file; WARNING: remember to index the reference using samtools faifx')
parser.add_argument('prediction_training_set',help='modification identity training set model file; .RData format')
parser.add_argument('output_folder',help='name of folder to put HAMR output')
parser.add_argument('out_prefix',help='Prefix for HAMR output')
parser.add_argument('min_qual',help='The minimum quality score of a read to be analyzed')
parser.add_argument('min_cov',help='The minimum coverage of a nucleotide to be analyzed')
parser.add_argument('seq_err',help='The percentage of mismatches based solely on sequencing error')
parser.add_argument('hypothesis',help='The hypothesis to be tested, either "H1" or "H4"')
parser.add_argument('max_p',help='The maximum p-value cutoff')
parser.add_argument('max_fdr',help='The maximum FDR cutoff')
parser.add_argument('refpercent',help='The percentage of reads that must match the reference nucleotide')
parser.add_argument('--target_bed', '-n', action='store', dest='target_bed', nargs='?', default='unspecified', help='Specifies genomic intervals for analysis; e.g. all mRNAs. If unspecified, defaults to whole genome')
parser.add_argument('--paired_ends','-pe',action='store_true',help='Use this tag to indicate paired-end sequencing')
parser.add_argument('--filter_ends','-fe',action='store_true',help='Exclude the first and last nucleotides of a read from the analysis')
args=parser.parse_args()
#Raise error if hypothesis has invalid value
if args.hypothesis != 'H1' and args.hypothesis != 'H4':
raise ValueError('Hypothesis must be H1 or H4.')
#locations of C, Bash, and R scripts
hamr_dir=os.path.dirname(os.path.realpath(sys.argv[0]))
rnapileup=hamr_dir+"/"+"rnapileup" #C-script
filter_pileup=hamr_dir+"/"+"filter_pileup" #C-script
rnapileup2mismatchbed=hamr_dir+"/"+"rnapileup2mismatchbed" #C-script
mismatchbed2table=hamr_dir+"/"+"mismatchbed2table.sh" #Shell script
detect_mods_definite=hamr_dir+"/"+"detect_mods.R" #R script
classify_mods=hamr_dir+"/"+"classify_mods.R" #Rscript
#get flags
pairedends=""
if (args.paired_ends):
pairedends="--paired"
#Check for output directory and make it if neccessary
output_folder = re.sub('\/$', '', args.output_folder)
if os.path.isdir(args.output_folder): #if no out dir then make one
print "Existing output folder " + output_folder + " detected, will overwrite all internal files"
subprocess.check_call(['mkdir', '-p', output_folder])
# make tmp directory if necessary
tmpDIR=output_folder + '/HAMR_temp'
subprocess.check_call(['mkdir', '-p', tmpDIR])
#get the date and time
now = datetime.datetime.now()
datelist = [str(now.year),str(now.month),str(now.day),str(now.hour),str(now.minute),str(now.second),str(now.microsecond)]
rightnow= "_".join(datelist)
rTag=tmpDIR + '/' + rightnow + '.HAMR' #date included in file
rTag=tmpDIR + '/' + 'HAMR.' + args.out_prefix
run_mode = "genome-wide"
if (args.target_bed != 'unspecified'):
run_mode = 'targeted'
inputBAM=args.bam
print 'Analyzing %s (%s)' %(inputBAM, run_mode)
bamForAnalysis = inputBAM
if (args.target_bed != 'unspecified'):
# extract alignments for the region(s) of interest
target_bed = args.target_bed
print 'Target BED is specified: ' + target_bed
print 'Restricting BAM to regions in ' + target_bed
inputBAMbasename=os.path.basename(inputBAM)
bam_constrained = output_folder + '/' + re.sub('\.[^.]+$','.constrained.bam',inputBAMbasename)
fout=open(bam_constrained,'wb')
subprocess.check_call([SAMTOOLS,'view','-b',inputBAM,'-L',target_bed],stdout=fout)
fout.close()
subprocess.check_call([SAMTOOLS,'index',bam_constrained])
bamForAnalysis=bam_constrained
print "BAM for HAMR analysis: " + bamForAnalysis
print 'Running RNApileup ' + rnapileup
rawpileup=rTag+'.pileup.raw'
frawpileup=open(rawpileup,'w')
subprocess.check_call([rnapileup,bamForAnalysis,args.genome_fas,pairedends],stdout=frawpileup)
frawpileup.close()
print 'Running filter_pileup...'
filteredpileup=rTag+'.pileup.filtered'
ffilteredpileup=open(filteredpileup,'w')
subprocess.check_call([filter_pileup,rawpileup,str(args.min_qual),str(int(args.filter_ends))],stdout=ffilteredpileup)
ffilteredpileup.close()
print ("Filter coverage...")
## this will output ALL sites with read depth >= min_cov!!
## this will be the total # of sites for HAMR analysis
filteredpileupcov=rTag+'.pileup.filtered.'+str(args.min_cov)
ffilteredpileupcov=open(filteredpileupcov,'w')
subprocess.check_call(['awk','$4>=' + str(args.min_cov),filteredpileup],stdout=ffilteredpileupcov)
ffilteredpileupcov.close()
print 'Running rnapileup2mismatchbed...'
# convert pileups into BED file with entry corresponding to the observed (ref nuc) --> (read nucleotide) transitions
mismatchbed=rTag+'.mismatch.bed'
fmismatchbed=open(mismatchbed,'w')
subprocess.check_call([rnapileup2mismatchbed,filteredpileupcov],stdout=fmismatchbed)
fmismatchbed.close()
print "converting mismatch BED to nucleotide frequency table"
# mismatchbed2table outputs all sites with at least 1 non-ref nuc
final_bed_file=mismatchbed
freq_table=rTag+'.freqtable.txt'
txt_output=open(freq_table,'w')
subprocess.check_call([mismatchbed2table, final_bed_file],stdout=txt_output)
txt_output.close()
#print "filtering out sites based on non-ref/ref proportions"
# filter by:
# min ref nuc pct
# non-ref/ref > 1%
final_freq_table=rTag+'.freqtable.final.txt'
min_ref_pct=args.refpercent
outf=open(final_freq_table,'w')
#subprocess.check_call(['awk','{cov=$5+$6+$7+$8;nonref=$9; ref=cov-nonref; if (ref/cov>=0.05 && nonref/ref>=0.01) print;}', freq_table],stdout=outf)
subprocess.check_call(['awk','{cov=$5+$6+$7+$8;nonref=$9; ref=cov-nonref; if (ref/cov>='+min_ref_pct+') print;}', freq_table],stdout=outf)
outf.close()
#OUTPUT steps
print "testing for statistical significance..."
last_tmp_file= final_freq_table #rTag+'.txt'
raw_file=output_folder+'/'+args.out_prefix+'.raw.txt'
outfn=open(raw_file,'w')
subprocess.check_call([RSCRIPT,detect_mods_definite,last_tmp_file,args.seq_err,args.hypothesis,args.max_p,args.max_fdr,args.refpercent],stdout=outfn)
outfn.close()
print "predicting modification identity..."
retOut=subprocess.check_output(['grep', '-c','TRUE',raw_file])
true_mods = int(retOut)
prediction_file=output_folder+'/'+args.out_prefix+'.mods.txt'
if (true_mods > 0):
outfn=open(prediction_file,'w')
subprocess.check_call([RSCRIPT,classify_mods,raw_file,args.prediction_training_set],stdout=outfn)
outfn.close()
else:
sys.exit("No HAMR modifications predicted, output will contain raw table only\nHAMR analysis complete\n\n------------------------------\n")
print "converting output to bed format..."
bed_file=output_folder+'/'+args.out_prefix+".mods.bed"
outfn=open(bed_file,'w')
subprocess.check_call(['awk', 'FNR > 1 {print $1"\t"$2"\t"(1+$2)"\t"$1";"$2"\t"$16"\t"$3}', prediction_file],stdout=outfn)
outfn.close()
threshold = int(args.min_cov)
print "calculating number of HAMR-accessible bases..."
# this is readily available from the filtered by min_cov pileup file
filt_pileup_file=filteredpileupcov
retOut=subprocess.check_output(['awk', 'END{print NR}',filt_pileup_file])
HAMR_accessible_bases = int(retOut)
print "Sites analyzed (read depth>=%d): %d" % (threshold, HAMR_accessible_bases)
print "Modification sites found: " + str(true_mods)
mods_per_acc_bases_file=output_folder+'/'+args.out_prefix+".hamr_acc_bases.txt"
outfn=open(mods_per_acc_bases_file,'w')
mods_per_acc_bases = float(true_mods)/float(HAMR_accessible_bases)*1000000
outfn.write('sample\tmods\thamr_accessible_bases\tmods_per_million_accessible_bases\n')
outfn.write(args.out_prefix+'\t'+str(true_mods)+'\t'+str(HAMR_accessible_bases)+'\t'+str(mods_per_acc_bases)+'\n')
outfn.close()
#conclusion message
print "HAMR analysis complete\n\n------------------------------\n"
# final_freq_table contains sites with ref / non-ref nucleotide mixtures
print "Sites used for analysis: %s" % final_freq_table
print "Statistical testing results: %s" % raw_file
print "Modification sites + predicted types saved to: %s" % prediction_file
|
import sys
import os
import time
import imp
import re
import subprocess
from util.hook import *
from util import output
def reload_all_modules(code):
code.variables = None
code.commands = None
code.setup()
output.info('Reloaded all modules')
def reload_module(code, name):
name = name.replace('.py', '')
if name not in sys.modules:
return 1
path = sys.modules[name].__file__
if path.endswith('.pyc') or path.endswith('.pyo'):
path = path[:-1]
module = imp.load_source(name, path)
sys.modules[name] = module
if hasattr(module, 'setup'):
module.setup(code)
code.register(vars(module))
code.bind()
mtime = os.path.getmtime(module.__file__)
modified = time.strftime('%H:%M:%S', time.gmtime(mtime))
module = str(module)
module_name, module_location = module.split()[1].strip(
'\''), module.split()[3].strip('\'').strip('>')
output.info('Reloaded %s' % module)
return {
'name': module_name,
'location': module_location,
'time': modified
}
@hook(cmds=['unload', 'unloadmodule', 'unloadmod'], args=True, priority='high', admin=True)
def unload_module(code, input):
name = input.group(2)
home = os.getcwd()
name = name.strip('.py')
# Get all files in modules directory
tmp = os.listdir(os.path.join(home, 'modules'))
modules = []
for module in tmp:
if module.endswith('.pyc'):
continue
module = module.replace('.py', '')
modules.append(module)
if name not in modules:
return code.say('That module doesn\'t exist!')
if name in code.unload:
return code.say('It seems that module has already been set to say unloaded!')
if name in code.load:
# Remove from unload, and add to load
del code.load[code.load.index(name)]
# filename = os.path.join(home, 'modules', name + '.py')
code.unload.append(name)
code.say('{b}Unloaded %s!' % name)
reload_all_modules(code)
code.say('{b}Reloaded all modules')
@hook(cmds=['load', 'loadmodule', 'loadmod'], args=True, priority='high', admin=True)
def load_module(code, input):
name = input.group(2)
home = os.getcwd()
name = name.replace('.py', '')
# Get all files in modules directory
tmp = os.listdir(os.path.join(home, 'modules'))
modules = []
for module in tmp:
if module.endswith('.pyc'):
continue
module = module.replace('.py', '')
modules.append(module)
if name not in modules:
return code.say('{b}That module doesn\'t exist!')
if name in code.modules:
return code.say('{b}That module seems to be already loaded!')
if name in code.load:
return code.say('{b}It seems that module has already been set to load!')
if name in code.unload:
# Remove from unload, and add to load
del code.unload[code.unload.index(name)]
# Try and load here. If it doesn't work, make sure it's not in code.load, and output error
# If it works, add to code.load
filename = os.path.join(home, 'modules', name + '.py')
try:
code.setup_module(name, filename, is_startup=False)
except Exception as e:
return code.say('{red}Error{c}: %s' % str(e))
code.load.append(name)
return code.say('{b}Loaded %s!' % name)
@hook(cmds=['reload', 'rld'], priority='high', thread=False, admin=True)
def reload(code, input):
"""Reloads a module, for use by admins only."""
name = input.group(2)
if not name or name == '*':
reload_all_modules(code)
return code.reply('{b}Reloaded all modules.')
try:
module = reload_module(code, name)
except Exception as e:
return code.say('Error reloading %s: %s' % (name, str(e)))
if module == 1:
return code.reply('The module {b}%s{b} isn\'t loaded! use %sload <module>' % (name, code.prefix))
code.say(
'{b}Reloaded {blue}%s{c} (from {blue}%s{c}) (version: {blue}%s{c}){b}' %
(module['name'], module['location'], module['time']))
@hook(cmds=['update'], rate=20, admin=True)
def update(code, input):
"""Pulls the latest versions of all modules from Git"""
if not sys.platform.startswith('linux'):
output.warning('Warning: {b}Using a non-unix OS, might fail to work!')
try:
proc = subprocess.Popen(
'git pull', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
git_data = proc.communicate()[0]
except:
return code.say('Either Git isn\'t installed or there was an error! (Using Windows?)')
if git_data.strip('\n') == 'Already up-to-date.':
return code.say('{b}No updates found.')
data = git_data
if re.match(r'^Updating [a-z0-9]{7}\.\.[a-z0-9]{7}$', data.strip('\n')):
# Pretty sure files are conflicting...
return code.say('{b}Files are conflicting with the update. Please refork the bot!')
# per-file additions/subtractions that spam stuff
data = re.sub(r'[0-9]+ [\+\-]+', '', data).replace('\n', ' ')
# mode changes, as those are unimportant
data = re.sub(r'create mode [0-9]+ [a-zA-Z0-9\/\\]+\.py', '', data)
# commit hashes, different color
data = re.sub(
r'(?P<first>[a-z0-9]{7})\.\.(?P<second>[a-z0-9]{7})', '{purple}\g<first>..\g<second>{c}', data)
# make different files depending on the importance
data = re.sub(
r'core/modules/(?P<name>[a-zA-Z0-9]+)\.py', '\g<name>.py ({red}core{c})', data)
data = re.sub(
r'core/(?P<name>[a-zA-Z0-9]+)\.py', '\g<name>.py ({red}core{c})', data)
data = re.sub(r'code\.py', 'code.py ({red}base{c})', data)
data = re.sub(
r'modules/(?P<name>[a-zA-Z0-9]+)\.py', '\g<name>.py ({blue}module{c})', data)
data = re.sub(
r'util/(?P<name>[a-zA-Z0-9]+)\.py', '\g<name>.py ({pink}util{c})', data)
data = re.sub(r'lib/(?P<dir>[a-zA-Z0-9]+)/(?P<name>[a-zA-Z0-9]+)\.py',
'\g<name>.py ({pink}\g<dir> - util{c})', data)
data = data.replace('Fast-forward', '')
# Do a little with file changes
data = re.sub(
r'(?P<files>[0-9]{1,3}) files? changed', '{green}\g<files>{c} file(s) changed', data)
data = re.sub(r'(?P<ins>[0-9]{1,6}) insertions\(\+\)\, (?P<dels>[0-9]{1,6}) deletions\(\-\)',
'+{green}\g<ins>{c}/-{red}\g<dels>{c}', data)
data = re.sub(
r'(?P<chars>[0-9]{1,6}) insertions?\(\+\)', '{green}\g<chars>{c} addition(s)', data)
data = re.sub(
r'(?P<chars>[0-9]{1,6}) deletions?\(\+\)', '{green}\g<chars>{c} deletion(s)', data)
while ' ' in data:
data = data.replace(' ', ' ')
code.say('Github: {b}' + data.strip())
core_stuff = ['code.py', 'core/', 'util/', 'lib/']
for item in core_stuff:
if item.lower() in git_data.lower().strip('\n'):
code.say(
'{b}{red}Core files have been edited, restarting the bot!{c}')
return code.restart()
reload_all_modules(code)
code.say('{b}Reloaded all modules')
|
# updated SQLA schema display to work with pydot 1.0.2
from sqlalchemy.orm.properties import RelationshipProperty
from sqlalchemy.orm import sync
import pydot
import types
__all__ = [
'create_uml_graph', 'create_schema_graph', 'show_uml_graph',
'show_schema_graph'
]
def _mk_label(mapper, show_operations, show_attributes, show_datatypes,
show_inherited, bordersize, show_table_name, show_schema_name, default_schema_name):
schema_table_names = []
if show_schema_name and (mapper.tables[0].schema or default_schema_name):
schema_name = mapper.tables[0].schema if mapper.tables[0].schema else default_schema_name
schema_table_names.append(schema_name)
if show_table_name:
schema_table_names.append(mapper.tables[0].name)
mapper_name = '{} [{}]'.format(mapper.class_.__name__, '.'.join(
schema_table_names)) if schema_table_names else mapper.class_.__name__
html = '<<TABLE CELLSPACING="0" CELLPADDING="1" BORDER="0" CELLBORDER="%d" ALIGN="LEFT"><TR><TD><FONT POINT-SIZE="10">%s</FONT></TD></TR>' % (
bordersize, mapper_name)
def format_col(col):
colstr = '+%s' % (col.name)
if show_datatypes:
colstr += ' : %s' % (col.type.__class__.__name__)
return colstr
if show_attributes:
if not show_inherited:
cols = [c for c in mapper.columns if c.table == mapper.tables[0]]
else:
cols = mapper.columns
html += '<TR><TD ALIGN="LEFT">%s</TD></TR>' % '<BR ALIGN="LEFT"/>'.join(
format_col(col) for col in cols)
else:
[
format_col(col) for col in sorted(
mapper.columns, key=lambda col: not col.primary_key)
]
if show_operations:
html += '<TR><TD ALIGN="LEFT">%s</TD></TR>' % '<BR ALIGN="LEFT"/>'.join(
'%s(%s)' % (name,", ".join(default is _mk_label and ("%s") % arg or ("%s=%s" % (arg,repr(default))) for default,arg in
zip((func.__kwdefaults__ and len(func.__code__.co_varnames)-1-(len(func.__kwdefaults__) or 0) or func.__code__.co_argcount-1)*[_mk_label]+list(func.__kwdefaults__ or []), func.__code__.co_varnames[1:])
))
for name,func in mapper.class_.__dict__.items() if isinstance(func, types.FunctionType) and func.__module__ == mapper.class_.__module__
)
html += '</TABLE>>'
return html
def escape(name):
return '"%s"' % name
def create_uml_graph(mappers,
show_operations=True,
show_attributes=True,
show_inherited=True,
show_multiplicity_one=False,
show_datatypes=True,
show_table_name=False,
show_schema_name=False,
default_schema_name=None,
linewidth=1.0,
font="Bitstream-Vera Sans"):
graph = pydot.Dot(
prog='neato',
mode="major",
overlap="0",
sep="0.01",
dim="3",
pack="True",
ratio=".75")
relations = set()
for mapper in mappers:
graph.add_node(
pydot.Node(
escape(mapper.class_.__name__),
shape="plaintext",
label=_mk_label(mapper, show_operations, show_attributes,
show_datatypes, show_inherited, linewidth,
show_table_name, show_schema_name, default_schema_name),
fontname=font,
fontsize="8.0",
))
if mapper.inherits:
graph.add_edge(
pydot.Edge(
escape(mapper.inherits.class_.__name__),
escape(mapper.class_.__name__),
arrowhead='none',
arrowtail='empty',
style="setlinewidth(%s)" % linewidth,
arrowsize=str(linewidth)))
for loader in mapper.iterate_properties:
if isinstance(loader,
RelationshipProperty) and loader.mapper in mappers:
if hasattr(loader, 'reverse_property'):
relations.add(frozenset([loader, loader.reverse_property]))
else:
relations.add(frozenset([loader]))
for relation in relations:
#if len(loaders) > 2:
# raise Exception("Warning: too many loaders for join %s" % join)
args = {}
def multiplicity_indicator(prop):
if prop.uselist:
return ' *'
if hasattr(prop, 'local_side'):
cols = prop.local_side
else:
cols = prop.local_columns
if any(col.nullable for col in cols):
return ' 0..1'
if show_multiplicity_one:
return ' 1'
return ''
if len(relation) == 2:
src, dest = relation
from_name = escape(src.parent.class_.__name__)
to_name = escape(dest.parent.class_.__name__)
def calc_label(src, dest):
return '+' + src.key + multiplicity_indicator(src)
args['headlabel'] = calc_label(src, dest)
args['taillabel'] = calc_label(dest, src)
args['arrowtail'] = 'none'
args['arrowhead'] = 'none'
args['constraint'] = False
else:
prop, = relation
from_name = escape(prop.parent.class_.__name__)
to_name = escape(prop.mapper.class_.__name__)
args['headlabel'] = '+%s%s' % (prop.key,
multiplicity_indicator(prop))
args['arrowtail'] = 'none'
args['arrowhead'] = 'vee'
graph.add_edge(
pydot.Edge(
from_name,
to_name,
fontname=font,
fontsize="7.0",
style="setlinewidth(%s)" % linewidth,
arrowsize=str(linewidth),
**args))
return graph
from sqlalchemy.dialects.postgresql.base import PGDialect
from sqlalchemy import Table, text, ForeignKeyConstraint
def _render_table_html(table, metadata, show_indexes, show_datatypes,
show_column_keys, show_schema_name, default_schema_name):
table_name = table.name
if show_schema_name and (table.schema or default_schema_name):
schema_name = table.schema if table.schema else default_schema_name
table_name = '{}.{}'.format(schema_name, table_name)
# add in (PK) OR (FK) suffixes to column names that are considered to be primary key or foreign key
use_column_key_attr = hasattr(
ForeignKeyConstraint, 'column_keys'
) # sqlalchemy > 1.0 uses column_keys to return list of strings for foreign keys, previously was columns
if show_column_keys:
if (use_column_key_attr):
# sqlalchemy > 1.0
fk_col_names = set([
h for f in table.foreign_key_constraints
for h in f.columns.keys()
])
else:
# sqlalchemy pre 1.0?
fk_col_names = set([
h.name for f in table.foreign_keys
for h in f.constraint.columns
])
# fk_col_names = set([h for f in table.foreign_key_constraints for h in f.columns.keys()])
pk_col_names = set([f for f in table.primary_key.columns.keys()])
else:
fk_col_names = set()
pk_col_names = set()
def format_col_type(col):
try:
return col.type.get_col_spec()
except (AttributeError, NotImplementedError):
return str(col.type)
def format_col_str(col):
# add in (PK) OR (FK) suffixes to column names that are considered to be primary key or foreign key
suffix = '(FK)' if col.name in fk_col_names else '(PK)' if col.name in pk_col_names else ''
if show_datatypes:
return "- %s : %s" % (col.name + suffix, format_col_type(col))
else:
return "- %s" % (col.name + suffix)
html = '<<TABLE BORDER="1" CELLBORDER="0" CELLSPACING="0"><TR><TD ALIGN="CENTER">%s</TD></TR><TR><TD BORDER="1" CELLPADDING="0"></TD></TR>' % table_name
html += ''.join('<TR><TD ALIGN="LEFT" PORT="%s">%s</TD></TR>' %
(col.name, format_col_str(col)) for col in table.columns)
if metadata.bind and isinstance(metadata.bind.dialect, PGDialect):
# postgres engine doesn't reflect indexes
indexes = dict((name, defin) for name, defin in metadata.bind.execute(
text(
"SELECT indexname, indexdef FROM pg_indexes WHERE tablename = '{}' AND schemaname = '{}'"
.format(table.name, table.schema))))
if indexes and show_indexes:
html += '<TR><TD BORDER="1" CELLPADDING="0"></TD></TR>'
for index, defin in indexes.items():
ilabel = 'UNIQUE' in defin and 'UNIQUE ' or 'INDEX '
ilabel += defin[defin.index('('):]
html += '<TR><TD ALIGN="LEFT">%s</TD></TR>' % ilabel
html += '</TABLE>>'
return html
def create_schema_graph(tables=None,
metadata=None,
show_indexes=True,
show_datatypes=True,
font="Bitstream-Vera Sans",
concentrate=True,
relation_options={},
rankdir='TB',
show_column_keys=False,
restrict_tables=None,
show_schema_name=False,
default_schema_name=None):
"""
Args:
show_column_keys (boolean, default=False): If true then add a PK/FK suffix to columns names that are primary and foreign keys
restrict_tables (None or list of strings): Restrict the graph to only consider tables whose name are defined restrict_tables
"""
relation_kwargs = {'fontsize': "7.0"}
relation_kwargs.update(relation_options)
if metadata is None and tables is not None and len(tables):
metadata = tables[0].metadata
elif tables is None and metadata is not None:
if not len(metadata.tables):
metadata.reflect()
tables = metadata.tables.values()
else:
raise ValueError("You need to specify at least tables or metadata")
graph = pydot.Dot(
prog="dot",
mode="ipsep",
overlap="ipsep",
sep="0.01",
concentrate=str(concentrate),
rankdir=rankdir)
if restrict_tables is None:
restrict_tables = set([t.name.lower() for t in tables])
else:
restrict_tables = set([t.lower() for t in restrict_tables])
tables = [t for t in tables if t.name.lower() in restrict_tables]
for table in tables:
graph.add_node(
pydot.Node(
str(table.name),
shape="plaintext",
label=_render_table_html(table, metadata, show_indexes,
show_datatypes, show_column_keys,
show_schema_name, default_schema_name),
fontname=font,
fontsize="7.0"))
for table in tables:
for fk in table.foreign_keys:
if fk.column.table not in tables:
continue
edge = [table.name, fk.column.table.name]
is_inheritance = fk.parent.primary_key and fk.column.primary_key
if is_inheritance:
edge = edge[::-1]
graph_edge = pydot.Edge(
dir='both',
headlabel="+ %s" % fk.column.name,
taillabel='+ %s' % fk.parent.name,
arrowhead=is_inheritance and 'none' or 'odot',
arrowtail=(fk.parent.primary_key or fk.parent.unique)
and 'empty' or 'crow',
fontname=font,
#samehead=fk.column.name, sametail=fk.parent.name,
*edge,
**relation_kwargs)
graph.add_edge(graph_edge)
# not sure what this part is for, doesn't work with pydot 1.0.2
# graph_edge.parent_graph = graph.parent_graph
# if table.name not in [e.get_source() for e in graph.get_edge_list()]:
# graph.edge_src_list.append(table.name)
# if fk.column.table.name not in graph.edge_dst_list:
# graph.edge_dst_list.append(fk.column.table.name)
# graph.sorted_graph_elements.append(graph_edge)
return graph
def show_uml_graph(*args, **kwargs):
from cStringIO import StringIO
from PIL import Image
iostream = StringIO(create_uml_graph(*args, **kwargs).create_png())
Image.open(iostream).show(command=kwargs.get('command', 'gwenview'))
def show_schema_graph(*args, **kwargs):
from cStringIO import StringIO
from PIL import Image
iostream = StringIO(create_schema_graph(*args, **kwargs).create_png())
Image.open(iostream).show(command=kwargs.get('command', 'gwenview'))
|
#
# PySNMP MIB module Wellfleet-MODULE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Wellfleet-MODULE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:40:57 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Counter32, Integer32, ModuleIdentity, Bits, Unsigned32, Gauge32, IpAddress, NotificationType, TimeTicks, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Counter32", "Integer32", "ModuleIdentity", "Bits", "Unsigned32", "Gauge32", "IpAddress", "NotificationType", "TimeTicks", "Counter64")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
wfHwModuleGroup, = mibBuilder.importSymbols("Wellfleet-COMMON-MIB", "wfHwModuleGroup")
wfHwModuleTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1), )
if mibBuilder.loadTexts: wfHwModuleTable.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleTable.setDescription('Hardware Module Table Filled in by the Module Driver. Read by SNMP to build the driver load records ')
wfHwModuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1), ).setIndexNames((0, "Wellfleet-MODULE-MIB", "wfHwModuleSlot"), (0, "Wellfleet-MODULE-MIB", "wfHwModuleModule"))
if mibBuilder.loadTexts: wfHwModuleEntry.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleEntry.setDescription('Hardware specific information about a slot.')
wfHwModuleSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleSlot.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleSlot.setDescription('A unique value for each slot. Its value ranges between 1 and 4.')
wfHwModuleModule = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleModule.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleModule.setDescription('This value ranges between 1 and 4')
wfHwModuleModIdOpt = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(512, 768, 769, 1280, 1281, 1408, 1536, 1537, 1538, 1540, 1541, 1542, 1544, 1545, 1546, 1584, 1585, 1586, 1588, 1589, 1590, 1592, 1593, 1594, 1664, 1792, 1793, 1800, 1801, 1808, 1809, 1825, 1833, 1856, 1857, 1864, 1865, 1872, 1873, 1889, 1897, 2048, 2049, 2176, 2304, 2560, 2816, 2944, 3072, 3073, 3328, 3329, 3330, 3584, 8000, 8160, 8161, 8320, 8321, 8500, 8501, 8704, 8720, 8728, 8729, 8744, 8736, 8752, 8768, 8776, 8780, 8784, 8800, 8808, 8816, 8832, 8848, 8864, 8872, 8873, 8890, 8891, 8972, 8880, 8896, 8912, 8928, 8944, 8960, 8976, 16384, 16640, 16896, 16897, 16898, 16899, 17152, 17153, 17154, 17155, 17408, 17664, 17920, 18176, 18432, 18688, 18944, 524288, 524544))).clone(namedValues=NamedValues(("spex", 512), ("spexhss", 768), ("spexhsd", 769), ("denm", 1280), ("denmhwf", 1281), ("iqe", 1408), ("dsnmnn", 1536), ("dsnmn1", 1537), ("dsnmn2", 1538), ("dsnm1n", 1540), ("dsnm11", 1541), ("dsnm12", 1542), ("dsnm2n", 1544), ("dsnm21", 1545), ("dsnm22", 1546), ("dsnmnnisdn", 1584), ("dsnmn1isdn", 1585), ("dsnmn2isdn", 1586), ("dsnm1nisdn", 1588), ("dsnm11isdn", 1589), ("dsnm12isdn", 1590), ("dsnm2nisdn", 1592), ("dsnm21isdn", 1593), ("dsnm22isdn", 1594), ("qsyncnm", 1664), ("mmfsdsas", 1792), ("mmfsddas", 1793), ("smfsdsas", 1800), ("smfsddas", 1801), ("mmscsas", 1808), ("mmscdas", 1809), ("smammbdas", 1825), ("mmasmbdas", 1833), ("mmfsdsashwf", 1856), ("mmfsddashwf", 1857), ("smfsdsashwf", 1864), ("smfsddashwf", 1865), ("mmscsashwf", 1872), ("mmscdashwf", 1873), ("smammbdashwf", 1889), ("mmasmbdashwf", 1897), ("dtnm", 2048), ("cam", 2049), ("iqtok", 2176), ("se100nm", 2304), ("asnqbri", 2560), ("mce1nm", 2816), ("dmct1nm", 2944), ("hwcompnm32", 3072), ("hwcompnm128", 3073), ("ahwcompnm32", 3328), ("ahwcompnm128", 3329), ("ahwcompnm256", 3330), ("shssinm", 3584), ("fbrmbdfen", 8000), ("ds1e1atm", 8160), ("ds3e3atm", 8161), ("pmcdsync", 8320), ("pmcqsync", 8321), ("fvoippmcc", 8500), ("fvoipt1e1pmc", 8501), ("arnmbstr", 8704), ("arnmbsen", 8720), ("arnmbsfetx", 8728), ("arnmbsfefx", 8729), ("litembsfetx", 8744), ("arnssync", 8736), ("arnv34", 8752), ("arndcsu", 8768), ("arnft1", 8776), ("arnfe1", 8780), ("arnisdns", 8784), ("arnisdnu", 8800), ("arnisdb", 8808), ("arnstkrg", 8816), ("arnsenet", 8832), ("arntsync", 8848), ("arnentsync", 8864), ("arne7sync", 8872), ("arn7sync", 8873), ("arnvoice", 8890), ("arnvoicedsync", 8891), ("arnpbe7sx10", 8972), ("arntrtsync", 8880), ("arnmbenx10", 8896), ("arnmbtrx10", 8912), ("arnpbenx10", 8928), ("arnpbtrx10", 8944), ("arnpbtenx10", 8960), ("arnpbttrx10", 8976), ("snm10t16", 16384), ("snm100t2", 16640), ("snmatmoc31mm", 16896), ("snmatmoc31dmm", 16897), ("snmatmoc31sm", 16898), ("snmatmoc31dsm", 16899), ("snmfddismm", 17152), ("snmfddisms", 17153), ("snmfddissm", 17154), ("snmfddisss", 17155), ("snm10f8", 17408), ("snm100f2", 17664), ("snm10t16p4", 17920), ("snm100t2p4", 18176), ("snm10t14100t1", 18432), ("snm100t16", 18688), ("snm10t14100f1", 18944), ("atm5000ah", 524288), ("atm5000bh", 524544)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleModIdOpt.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleModIdOpt.setDescription('Module IDs for the net modules modules')
wfHwModuleModRev = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleModRev.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleModRev.setDescription('The revision level of the module. High byte is in upper 2 bytes.')
wfHwModuleModSerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleModSerialNumber.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleModSerialNumber.setDescription('The serial number of the module.')
wfHwModuleArtworkRev = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleArtworkRev.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleArtworkRev.setDescription('The Artwork Revision number of the module')
wfHwModuleMemorySize1 = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleMemorySize1.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleMemorySize1.setDescription('Size (in bytes) of memory #1.')
wfHwModuleMemorySize2 = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleMemorySize2.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleMemorySize2.setDescription('Size (in bytes) of memory #2.')
wfHwModuleDaughterBdIdOpt = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleDaughterBdIdOpt.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleDaughterBdIdOpt.setDescription('Daughterboard IDs that may be attached to net modules')
wfHwModuleLEDStatus1 = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleLEDStatus1.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleLEDStatus1.setDescription("Bit mask representing the status of the LEDs on the module. Each LED is represented by 2 bits. LED #1 status is indicated by the 2 least significant bits. LED #16 status is indicated by the 2 most significant bits. How this bit mask is interpreted depends on the module ID. For example, some modules use this value to report what color the LED is currently set to ('00' off, '01' yellow, '10' green). This value can be used by a NMS that needs to know this information for whatever reason. Some modules may not support this value in which case it should be set to zero.")
wfHwModuleLEDState1 = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleLEDState1.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleLEDState1.setDescription("Bit mask representing the state of the LEDs on the module. Each LED is represented by 2 bits. LED #1 state is indicated by the 2 least significant bits. LED #16 state is indicated by the 2 most significant bits. How this bit mask is interpreted depends on the module ID. For example, some modules use this value to indicate a LED is flashing (value of '01') or a solid color (value of '00'). This value can be used by a NMS that needs to know this information for whatever reason. Some modules may not support this value in which case it should be set to zero.")
wfHwModuleLEDStatus2 = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleLEDStatus2.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleLEDStatus2.setDescription("Bit mask representing the status of the LEDs on the module. Each LED is represented by 2 bits. LED #17 status is indicated by the 2 least significant bits. LED #32 status is indicated by the 2 most significant bits. How this bit mask is interpreted depends on the module ID. For example, some modules use this value to report what color the LED is currently set to ('00' off, '01' yellow, '10' green). This value can be used by a NMS that needs to know this information for whatever reason. Some modules may not support this value in which case it should be set to zero.")
wfHwModuleLEDState2 = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleLEDState2.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleLEDState2.setDescription("Bit mask representing the state of the LEDs on the module. Each LED is represented by 2 bits. LED #17 state is indicated by the 2 least significant bits. LED #32 state is indicated by the 2 most significant bits. How this bit mask is interpreted depends on the module ID. For example, some modules use this value to indicate a LED is flashing (value of '01') or a solid color (value of '00'). This value can be used by a NMS that needs to know this information for whatever reason. Some modules may not support this value in which case it should be set to zero.")
wfHwModuleLEDStatus3 = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleLEDStatus3.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleLEDStatus3.setDescription("Bit mask representing the status of the LEDs on the module. Each LED is represented by 2 bits. LED #33 status is indicated by the 2 least significant bits. LED #48 status is indicated by the 2 most significant bits. How this bit mask is interpreted depends on the module ID. For example, some modules use this value to report what color the LED is currently set to ('00' off, '01' yellow, '10' green). This value can be used by a NMS that needs to know this information for whatever reason. Some modules may not support this value in which case it should be set to zero.")
wfHwModuleLEDState3 = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 1, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfHwModuleLEDState3.setStatus('mandatory')
if mibBuilder.loadTexts: wfHwModuleLEDState3.setDescription("Bit mask representing the state of the LEDs on the module. Each LED is represented by 2 bits. LED #33 state is indicated by the 2 least significant bits. LED #48 state is indicated by the 2 most significant bits. How this bit mask is interpreted depends on the module ID. For example, some modules use this value to indicate a LED is flashing (value of '01') or a solid color (value of '00'). This value can be used by a NMS that needs to know this information for whatever reason. Some modules may not support this value in which case it should be set to zero.")
wfModuleTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2), )
if mibBuilder.loadTexts: wfModuleTable.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleTable.setDescription('This table is used by the module driver for Barracuda')
wfModuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1), ).setIndexNames((0, "Wellfleet-MODULE-MIB", "wfModuleSlot"))
if mibBuilder.loadTexts: wfModuleEntry.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleEntry.setDescription('Hardware specific information about a slot.')
wfModuleDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfModuleDelete.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleDelete.setDescription('create/delete parameter')
wfModuleSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfModuleSlot.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleSlot.setDescription('A unique value for each slot. Its value ranges between 1 and 14. There are products in this family that contain 1, 5, and 14 slots.')
wfModuleTimerFrequency = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("timerdefault", 1))).clone('timerdefault')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfModuleTimerFrequency.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleTimerFrequency.setDescription('This value determines the frequency for the buffer balance algorithm to run')
wfModuleBufferBalance = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("txrx", 1), ("none", 2), ("rx", 3), ("tx", 4))).clone('txrx')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfModuleBufferBalance.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleBufferBalance.setDescription('Enable/Disable buffer balancing algorithm selectively')
wfModuleFddiWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)).clone(6)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfModuleFddiWeight.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleFddiWeight.setDescription('This value determines the weight of the fddi line for the buffer balancing algorithm')
wfModuleTokenRingWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)).clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfModuleTokenRingWeight.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleTokenRingWeight.setDescription('This value determines the weight of the token-ring for the buffer balancing algorithm')
wfModuleCsmacdWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)).clone(3)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfModuleCsmacdWeight.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleCsmacdWeight.setDescription('This value determines the weight of the csmacd line for the buffer balancing algorithm')
wfModuleSyncWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfModuleSyncWeight.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleSyncWeight.setDescription('This value determines the weight of the sync line for the buffer balancing algorithm')
wfModuleFreeBufferCredits = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfModuleFreeBufferCredits.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleFreeBufferCredits.setDescription('This attribute indicates the number of buffers available to line drivers but not used by them')
wfModuleTotalBufferCredits = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfModuleTotalBufferCredits.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleTotalBufferCredits.setDescription('This attribute indicates the total number of buffers available to line drivers')
wfModuleRestart = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 11), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfModuleRestart.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleRestart.setDescription('This attribute should be touched after the queue lengths are configured in the line-records')
wfModuleCsmacd100Weight = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)).clone(6)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfModuleCsmacd100Weight.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleCsmacd100Weight.setDescription('This value determines the weight of the csmacd 100MB line for the buffer balancing algorithm')
wfModuleBisyncWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfModuleBisyncWeight.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleBisyncWeight.setDescription('This value determines the weight of the bisync line for the buffer balancing algorithm')
wfModuleHssiWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 1, 4, 2, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfModuleHssiWeight.setStatus('mandatory')
if mibBuilder.loadTexts: wfModuleHssiWeight.setDescription('This value determines the weight of the hssi line for the buffer balancing algorithm')
mibBuilder.exportSymbols("Wellfleet-MODULE-MIB", wfHwModuleModRev=wfHwModuleModRev, wfHwModuleMemorySize1=wfHwModuleMemorySize1, wfHwModuleEntry=wfHwModuleEntry, wfHwModuleModIdOpt=wfHwModuleModIdOpt, wfModuleSyncWeight=wfModuleSyncWeight, wfHwModuleLEDStatus1=wfHwModuleLEDStatus1, wfModuleHssiWeight=wfModuleHssiWeight, wfModuleEntry=wfModuleEntry, wfHwModuleTable=wfHwModuleTable, wfHwModuleLEDState2=wfHwModuleLEDState2, wfModuleBufferBalance=wfModuleBufferBalance, wfHwModuleLEDState3=wfHwModuleLEDState3, wfHwModuleLEDState1=wfHwModuleLEDState1, wfModuleTokenRingWeight=wfModuleTokenRingWeight, wfModuleCsmacdWeight=wfModuleCsmacdWeight, wfModuleFddiWeight=wfModuleFddiWeight, wfHwModuleArtworkRev=wfHwModuleArtworkRev, wfModuleFreeBufferCredits=wfModuleFreeBufferCredits, wfModuleTable=wfModuleTable, wfHwModuleLEDStatus3=wfHwModuleLEDStatus3, wfModuleSlot=wfModuleSlot, wfHwModuleDaughterBdIdOpt=wfHwModuleDaughterBdIdOpt, wfHwModuleMemorySize2=wfHwModuleMemorySize2, wfHwModuleModule=wfHwModuleModule, wfModuleDelete=wfModuleDelete, wfModuleRestart=wfModuleRestart, wfModuleTimerFrequency=wfModuleTimerFrequency, wfModuleTotalBufferCredits=wfModuleTotalBufferCredits, wfModuleBisyncWeight=wfModuleBisyncWeight, wfModuleCsmacd100Weight=wfModuleCsmacd100Weight, wfHwModuleSlot=wfHwModuleSlot, wfHwModuleLEDStatus2=wfHwModuleLEDStatus2, wfHwModuleModSerialNumber=wfHwModuleModSerialNumber)
|
# ------------------------------------------------------------------------------
# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements; and to You under the Apache License,
# Version 2.0. "
#
# Forschungszentrum Jülich
# Institute: Institute for Advanced Simulation (IAS)
# Section: Jülich Supercomputing Centre (JSC)
# Division: High Performance Computing in Neuroscience
# Laboratory: Simulation Laboratory Neuroscience
# Team: Multi-scale Simulation and Design
# ------------------------------------------------------------------------------
from EBRAINS_InterscaleHUB.refactored_modular.elephant_delegator import ElephantDelegator
from EBRAINS_ConfigManager.global_configurations_manager.xml_parsers.default_directories_enum import DefaultDirectories
class Analyzer:
'''
Main class for analysis of data.
'''
def __init__(self, param, configurations_manager, log_settings):
"""
"""
self._log_settings = log_settings
self._configurations_manager = configurations_manager
self.__logger = self._configurations_manager.load_log_configurations(
name="Analyzer",
log_configurations=self._log_settings,
target_directory=DefaultDirectories.SIMULATION_RESULTS)
self.__elephant_delegator = ElephantDelegator(param, configurations_manager, log_settings)
self.__logger.info("Initialised")
def spiketrains_to_rate(self, count, spiketrains):
"""analyzes the data for a given time interval and returns the results.
# TODO Discuss how to handle and call the available Analysis wrappers
# TODO Validate if it analyze the data otherwise return ERROR as response
# TODO First usecase functions are rate to spike and spike to rate
Parameters
----------
data : Any
Data to be analyzed
time_start: int
time to start the analysis
time_stop: int
time to stop the analysis
variation : bool
boolean for variation of rate
windows: float
the window to compute rate
Returns
------
returns the analyzed data
"""
return self.__elephant_delegator.spiketrains_to_rate(count, spiketrains)
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
These pipelines are developed by the Poldrack lab at Stanford University
(https://poldracklab.stanford.edu/) for use at
the Center for Reproducible Neuroscience (http://reproducibility.stanford.edu/),
as well as for open-source software distribution.
"""
from __future__ import absolute_import, division, print_function
import datetime
from os import path as op
import runpy
nipype_info = runpy.run_path(op.join(op.abspath(op.dirname(__file__)),
'nipype', 'info.py'))
__version__ = '0.3.5-dev'
__packagename__ = 'niworkflows'
__author__ = 'The CRN developers'
__copyright__ = 'Copyright {}, Center for Reproducible Neuroscience, Stanford University'.format(
datetime.datetime.now().year)
__credits__ = ['Oscar Esteban', 'Ross Blair', 'Shoshana L. Berleant', 'Chris Gorgolewski',
'Russell A. Poldrack']
__license__ = '3-clause BSD'
__maintainer__ = 'Oscar Esteban'
__email__ = 'crn.poldracklab@gmail.com'
__status__ = 'Prototype'
__description__ = "NeuroImaging Workflows provides processing tools for magnetic resonance images of the brain."
__longdesc__ = "NeuroImaging Workflows (NIWorkflows) is a selection of image processing workflows for magnetic resonance images of the brain. It is designed to provide an easily accessible, state-of-the-art interface that is robust to differences in scan acquisition protocols and that requires minimal user input. This open-source neuroimaging data processing tool is being developed as a part of the MRI image analysis and reproducibility platform offered by the CRN."
DOWNLOAD_URL = (
'https://pypi.python.org/packages/source/{name[0]}/{name}/{name}-{ver}.tar.gz'.format(
name=__packagename__, ver=__version__))
URL = 'https://github.com/poldracklab/{}'.format(__packagename__)
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Image Recognition',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
REQUIRES = nipype_info['REQUIRES'] + [
'nilearn>=0.2.6',
'sklearn',
'pandas',
'matplotlib',
'jinja2',
'svgutils',
'seaborn',
]
SETUP_REQUIRES = []
REQUIRES += SETUP_REQUIRES
LINKS_REQUIRES = []
TESTS_REQUIRES = ['mock', 'codecov', 'pytest-xdist', 'pytest']
EXTRA_REQUIRES = {
'doc': ['sphinx'],
'tests': TESTS_REQUIRES,
'duecredit': ['duecredit']
}
# Enable a handle to install all extra dependencies at once
EXTRA_REQUIRES['all'] = [val for _, val in list(EXTRA_REQUIRES.items())]
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.result as result
from benchexec.tools.template import BaseTool2
class Tool(BaseTool2):
"""
Tool info module for GWIT. GWIT (as in 'guess what I am thinking' or as in 'GDart witness validator')
is a witness validator for SVCOMP witnesses for Java programs, based on the *GDart* tool ensemble.
https://github.com/tudo-aqua/gwit
"""
def executable(self, tool_locator):
return tool_locator.find_executable("run-gwit.sh")
def version(self, executable):
return self._version_from_tool(executable, arg="-v")
def name(self):
return "GWIT"
def cmdline(self, executable, options, task, rlimits):
cmd = [executable] + options
if task.property_file:
cmd.append(task.property_file)
return cmd + list(task.input_files)
def determine_result(self, run):
status = result.RESULT_ERROR
if run.output.any_line_contains("== ERROR"):
status = result.RESULT_FALSE_PROP
elif run.output.any_line_contains("== OK"):
status = result.RESULT_TRUE_PROP
elif run.output.any_line_contains("== DONT-KNOW"):
status = result.RESULT_UNKNOWN
return status
|
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Converting MySQL and Python types
"""
import struct
import datetime
import time
from decimal import Decimal
from . import errors
from .constants import FieldType, FieldFlag
class ConverterBase(object):
def __init__(self, charset='utf8', use_unicode=True):
self.python_types = None
self.mysql_types = None
self.set_charset(charset)
self.set_unicode(use_unicode)
def set_charset(self, charset):
if charset is not None:
self.charset = charset
else:
# default to utf8
self.charset = 'utf8'
def set_unicode(self, value=True):
self.use_unicode = value
def to_mysql(self, value):
return value
def to_python(self, vtype, value):
return value
def escape(self, buf):
return buf
def quote(self, buf):
return str(buf)
class MySQLConverter(ConverterBase):
"""
A converted class grouping:
o escape method: for escpaing values send to MySQL
o quoting method: for quoting values send to MySQL in statements
o conversion mapping: maps Python and MySQL data types to
function for converting them.
This class should be overloaded whenever one needs differences
in how values are to be converted. Each MySQLConnection object
has a default_converter property, which can be set like
MySQL.converter(CustomMySQLConverter)
"""
def __init__(self, charset=None, use_unicode=True):
ConverterBase.__init__(self, charset, use_unicode)
def escape(self, value):
"""
Escapes special characters as they are expected to by when MySQL
receives them.
As found in MySQL source mysys/charset.c
Returns the value if not a string, or the escaped string.
"""
if value is None:
return value
elif isinstance(value, (int,float,Decimal)):
return value
if isinstance(value,bytes):
value = value.replace(b'\\',b'\\\\')
value = value.replace(b'\n',b'\\n')
value = value.replace(b'\r',b'\\r')
value = value.replace(b'\047',b'\134\047') # single quotes
value = value.replace(b'\042',b'\134\042') # double quotes
value = value.replace(b'\032',b'\134\032') # for Win32
else:
value = value.replace('\\','\\\\')
value = value.replace('\n','\\n')
value = value.replace('\r','\\r')
value = value.replace('\047','\134\047') # single quotes
value = value.replace('\042','\134\042') # double quotes
value = value.replace('\032','\134\032') # for Win32
return value
def quote(self, buf):
"""
Quote the parameters for commands. General rules:
o numbers are returns as bytes using ascii codec
o None is returned as bytes('NULL')
o Everything else is single quoted '<bytes>'
Returns a bytes object.
"""
if isinstance(buf, (int,float,Decimal)):
return str(buf).encode('ascii')
elif isinstance(buf, type(None)):
return b"NULL"
else:
return b"'" + buf + b"'"
def to_mysql(self, value):
type_name = value.__class__.__name__.lower()
return getattr(self, "_{}_to_mysql".format(type_name))(value)
def _int_to_mysql(self, value):
return int(value)
def _long_to_mysql(self, value):
return int(value) #long and int are equals
def _float_to_mysql(self, value):
return float(value)
def _str_to_mysql(self, value):
return value.encode(self.charset)
def _bytes_to_mysql(self, value):
return value
def _bool_to_mysql(self, value):
if value:
return 1
else:
return 0
def _nonetype_to_mysql(self, value):
"""
This would return what None would be in MySQL, but instead we
leave it None and return it right away. The actual conversion
from None to NULL happens in the quoting functionality.
Return None.
"""
return None
def _datetime_to_mysql(self, value):
"""
Converts a datetime instance to a string suitable for MySQL.
The returned string has format: %Y-%m-%d %H:%M:%S[.%f]
If the instance isn't a datetime.datetime type, it return None.
Returns a bytes.
"""
if value.microsecond:
return '{:d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}.{:06d}'.format(
value.year, value.month, value.day,
value.hour, value.minute, value.second,
value.microsecond).encode('ascii')
return '{:d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}'.format(
value.year, value.month, value.day,
value.hour, value.minute, value.second).encode('ascii')
def _date_to_mysql(self, value):
"""
Converts a date instance to a string suitable for MySQL.
The returned string has format: %Y-%m-%d
If the instance isn't a datetime.date type, it return None.
Returns a bytes.
"""
return '{:d}-{:02d}-{:02d}'.format(value.year, value.month,
value.day).encode('ascii')
def _time_to_mysql(self, value):
"""
Converts a time instance to a string suitable for MySQL.
The returned string has format: %H:%M:%S[.%f]
If the instance isn't a datetime.time type, it return None.
Returns a bytes.
"""
if value.microsecond:
return value.strftime('%H:%M:%S.%f').encode('ascii')
return value.strftime('%H:%M:%S').encode('ascii')
def _struct_time_to_mysql(self, value):
"""
Converts a time.struct_time sequence to a string suitable
for MySQL.
The returned string has format: %Y-%m-%d %H:%M:%S
Returns a bytes or None when not valid.
"""
return time.strftime('%Y-%m-%d %H:%M:%S', value).encode('ascii')
def _timedelta_to_mysql(self, value):
"""
Converts a timedelta instance to a string suitable for MySQL.
The returned string has format: %H:%M:%S
Returns a bytes.
"""
(hours, r) = divmod(value.seconds, 3600)
(mins, secs) = divmod(r, 60)
hours = hours + (value.days * 24)
if value.microseconds:
return '{:02d}:{:02d}:{:02d}.{:06d}'.format(
hours, mins, secs, value.microseconds).encode('ascii')
return '{:02d}:{:02d}:{:02d}'.format(
hours, mins, secs).encode('ascii')
def _decimal_to_mysql(self, value):
"""
Converts a decimal.Decimal instance to a string suitable for
MySQL.
Returns a bytes or None when not valid.
"""
if isinstance(value, Decimal):
return str(value).encode('ascii')
return None
def to_python(self, flddsc, value):
"""
Converts a given value coming from MySQL to a certain type in Python.
The flddsc contains additional information for the field in the
table. It's an element from MySQLCursor.description.
Returns a mixed value.
"""
res = value
if value == 0 and flddsc[1] != FieldType.BIT: # \x00
# Don't go further when we hit a NULL value
return None
if value is None:
return None
func_name = '_{}_to_python'.format(FieldType.get_info(flddsc[1]))
try:
return getattr(self, func_name)(value, flddsc)
except KeyError:
# If one type is not defined, we just return the value as str
return value.decode('utf-8')
except ValueError as e:
raise ValueError("%s (field %s)" % (e, flddsc[0]))
except TypeError as e:
raise TypeError("%s (field %s)" % (e, flddsc[0]))
except:
raise
def _FLOAT_to_python(self, v, desc=None):
"""
Returns v as float type.
"""
return float(v)
_DOUBLE_to_python = _FLOAT_to_python
def _INT_to_python(self, v, desc=None):
"""
Returns v as int type.
"""
return int(v)
_TINY_to_python = _INT_to_python
_SHORT_to_python = _INT_to_python
_INT24_to_python = _INT_to_python
def _LONG_to_python(self, v, desc=None):
"""
Returns v as long type.
"""
return int(v)
_LONGLONG_to_python = _LONG_to_python
def _DECIMAL_to_python(self, v, desc=None):
"""
Returns v as a decimal.Decimal.
"""
s = v.decode(self.charset)
return Decimal(s)
_NEWDECIMAL_to_python = _DECIMAL_to_python
def _str(self, v, desc=None):
"""
Returns v as str type.
"""
return str(v)
def _BIT_to_python(self, v, dsc=None):
"""Returns BIT columntype as integer"""
s = v
if len(s) < 8:
s = b'\x00'*(8-len(s)) + s
return struct.unpack('>Q', s)[0]
def _DATE_to_python(self, v, dsc=None):
"""
Returns DATE column type as datetime.date type.
"""
pv = None
try:
pv = datetime.date(*[ int(s) for s in v.split(b'-')])
except ValueError:
return None
else:
return pv
_NEWDATE_to_python = _DATE_to_python
def _TIME_to_python(self, v, dsc=None):
"""
Returns TIME column type as datetime.time type.
"""
pv = None
try:
(hms, fs) = v.split(b'.')
fs = int(fs.ljust(6, b'0'))
except ValueError:
hms = v
fs = 0
try:
(h, m, s) = [ int(s) for s in hms.split(b':')]
pv = datetime.timedelta(hours=h, minutes=m, seconds=s,
microseconds=fs)
except ValueError:
raise ValueError(
"Could not convert {} to python datetime.timedelta".format(v))
else:
return pv
def _DATETIME_to_python(self, v, dsc=None):
"""
Returns DATETIME column type as datetime.datetime type.
"""
pv = None
try:
(sd, st) = v.split(b' ')
if len(st) > 8:
(hms, fs) = st.split(b'.')
fs = int(fs.ljust(6, b'0'))
else:
hms = st
fs = 0
dt = [ int(v) for v in sd.split(b'-') ] +\
[ int(v) for v in hms.split(b':') ] + [fs,]
pv = datetime.datetime(*dt)
except ValueError:
pv = None
return pv
_TIMESTAMP_to_python = _DATETIME_to_python
def _YEAR_to_python(self, v, desc=None):
"""Returns YEAR column type as integer"""
try:
year = int(v)
except ValueError:
raise ValueError("Failed converting YEAR to int (%s)" % v)
return year
def _SET_to_python(self, v, dsc=None):
"""Returns SET column typs as set
Actually, MySQL protocol sees a SET as a string type field. So this
code isn't called directly, but used by STRING_to_python() method.
Returns SET column type as a set.
"""
pv = None
s = v.decode(self.charset)
try:
pv = set(s.split(','))
except ValueError:
raise ValueError("Could not convert set %s to a sequence." % v)
return pv
def _STRING_to_python(self, v, dsc=None):
"""
Note that a SET is a string too, but using the FieldFlag we can see
whether we have to split it.
Returns string typed columns as string type.
"""
if dsc is not None:
# Check if we deal with a SET
if dsc[7] & FieldFlag.SET:
return self._SET_to_python(v, dsc)
if dsc[7] & FieldFlag.BINARY:
return v
if isinstance(v, bytes) and self.use_unicode:
return v.decode(self.charset)
return v
_VAR_STRING_to_python = _STRING_to_python
def _BLOB_to_python(self, v, dsc=None):
if dsc is not None:
if dsc[7] & FieldFlag.BINARY:
return bytes(v)
return self._STRING_to_python(v, dsc)
_LONG_BLOB_to_python = _BLOB_to_python
_MEDIUM_BLOB_to_python = _BLOB_to_python
_TINY_BLOB_to_python = _BLOB_to_python
|
import os
import shutil
import numpy as np
old_model_path = '/home/guoran/git-repo/yolo_test_1218/yolov3_model'
new_model_path = "yolov3_model_python/"
os.mkdir(new_model_path)
layers = os.listdir(old_model_path)
print(layers)
for layer in layers:
models=os.listdir(os.path.join(old_model_path, layer))
for model in models:
src_path = old_model_path+"/"+layer+"/"+model
#print(src_path)
dst_dir = os.path.join(new_model_path, layer + "-" + model)
#print(dst_dir)
os.mkdir(dst_dir)
os.mkdir(dst_dir+"-momentum")
#print(dst_dir)
shutil.copyfile(src_path , dst_dir + "/out")
momentum = np.fromfile(src_path, dtype=np.float32)
momentum[:] = 0
momentum.tofile(dst_dir+"-momentum/out")
print("cp",old_model_path+"/"+layer+"/"+model , dst_dir + "/out")
|
r"""
Utils to play with PyTorch.
"""
import torch.distributed as dist
# pylint: disable=broad-except
# pylint: disable=protected-access
def get_torch_default_comm():
r"""
The NCCL communicator is needed so that Fast MoE can perform customized
communication operators in the C code. However, it is not a publicly
available variable. Therefore, a hacking class of the `ProcessGroupNCCL`
in Fast MoE's C code takes the `_default_pg` and tries to dig the
communicator out from the object. As PyTorch's private interface varies from
time to time, different hacking techniques are tried one-by-one to be
compatible with various versions of PyTorch.
"""
try:
comm = dist.distributed_c10d._get_default_group()
return comm
except Exception as _:
pass
try:
comm = dist.distributed_c10d._default_pg
if comm is not None:
return comm
except Exception as _:
pass
raise RuntimeError("Unsupported PyTorch version")
|
import os
import sys
import glob
import yaml
import simplejson
def makedirs(output_folder):
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
def read_yaml_file(file_path, is_convert_dict_to_class=True):
with open(file_path, 'r') as stream:
data = yaml.safe_load(stream)
if is_convert_dict_to_class:
data = dict2class(data)
return data
def read_json_file(file_path):
with open(file_path, 'r') as f:
data = simplejson.load(f)
return data
def get_filenames(folder, is_base_name=False):
''' Get all filenames under the specific folder.
e.g.:
full name: data/rgb/000001.png
base name: 000001.png
'''
full_names = sorted(glob.glob(folder + "/*"))
if is_base_name:
base_names = [name.split("/")[-1] for name in full_names]
return base_names
else:
return full_names
class SimpleNamespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
keys = sorted(self.__dict__)
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(type(self).__name__, ", ".join(items))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def dict2class(args_dict):
args = SimpleNamespace()
args.__dict__.update(**args_dict)
return args
|
"""Test accessing and manipulating attributes of a little endian image."""
import binascii
import os
import textwrap
import pytest
from baseline import Baseline
from exif import Image
from .little_endian_baselines import LITTLE_ENDIAN_MODIFY_BASELINE
from ._utils import check_value
# pylint: disable=pointless-statement, protected-access
def test_modify():
"""Verify that modifying tags updates the tag values as expected."""
with open(
os.path.join(os.path.dirname(__file__), "little_endian.jpg"), "rb"
) as image_file:
image = Image(image_file)
image.model = "Modified"
assert image.model == "Modified"
image.make = "Value for Make Tag that is Longer than Before"
assert image.make == "Value for Make Tag that is Longer than Before"
image.gps_longitude = (12.0, 34.0, 56.789)
assert str(image.gps_longitude) == Baseline("""(12.0, 34.0, 56.789)""")
segment_hex = (
binascii.hexlify(image._segments["APP1"].get_segment_bytes())
.decode("utf-8")
.upper()
)
assert "\n".join(textwrap.wrap(segment_hex, 90)) == LITTLE_ENDIAN_MODIFY_BASELINE
read_attributes = [
("color_space", repr, "<ColorSpace.SRGB: 1>"),
("datetime_original", str, "2019:02:08 21:44:35"),
("gps_latitude", str, "(79.0, 36.0, 54.804590935844615)"),
("gps_longitude", str, "(47.0, 25.0, 34.489798675854615)"),
("make", str, "EXIF Package"),
("model", str, "Little Endian"),
("resolution_unit", repr, "<ResolutionUnit.INCHES: 2>"),
("saturation", repr, "<Saturation.LOW: 1>"),
("sharpness", repr, "<Sharpness.SOFT: 1>"),
("x_resolution", str, "200.0"),
("y_resolution", str, "200.0"),
]
@pytest.mark.parametrize(
"attribute, func, value",
read_attributes,
ids=[params[0] for params in read_attributes],
)
def test_read(attribute, func, value):
"""Test reading tags and compare to known baseline values."""
with open(
os.path.join(os.path.dirname(__file__), "little_endian.jpg"), "rb"
) as image_file:
image = Image(image_file)
assert check_value(func(getattr(image, attribute)), value)
|
import json
import logging
import random
import smtplib
import string
import threading
import time
from email.mime.text import MIMEText
from email.utils import formatdate
import requests
import websocket
from Crypto.Hash import SHA256
API_VERSION_1 = '1.0.0'
AUTH_PATH = 'cvpservice/login/authenticate.do'
DATE_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
GET = 'get'
SUBSCRIBE = 'subscribe'
NOTIFY_METHOD = 'syslog' # 'syslog' or 'smtp'
syslogserver = 'testing'
class TelemetryWs(object):
"""
Class to handle connection methods required to get
and subscribe to steaming data.
"""
def __init__(self, cmd_args, passwords):
super(TelemetryWs, self).__init__()
if cmd_args.noTelemetrySsl:
telemetry_ws = 'ws://{}/aeris/v1/wrpc/'.format(cmd_args.telemetryUrl)
self.socket = websocket.WebSocketApp(
telemetry_ws,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
)
else: # login and setup wss
credentials = {
'userId': cmd_args.telemetryUsername,
'password': passwords['telemetryPassword'],
}
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
request = requests.post(
'https://{}/{}'.format(cmd_args.telemetryUrl, AUTH_PATH),
data=json.dumps(credentials), headers=headers,
verify=not cmd_args.noSslValidation,
)
if request.status_code == 200:
logging.info('Successfully logged in to Telemetry.')
headers = [
'Cookie: session_id={}'.format(request.json()['sessionId']),
'Cache-Control: no-cache',
'Pragma: no-cache',
]
telemetry_ws = 'wss://{}/aeris/v1/wrpc/'.format(cmd_args.telemetryUrl)
self.socket = websocket.WebSocketApp(
telemetry_ws,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
header=headers,
)
else:
logging.error('Telemetry credentials invalid. Could not log in.')
exit()
self.config = cmd_args
self.passwords = passwords
self.devices = {}
self.devices_get_token = None
self.devices_sub_token = None
self.events_token = None
self.socket.on_open = self.on_run
def on_run(self):
"""
Methods to run when the ws connects
"""
logging.info('Websocket connected.')
self.get_and_subscribe_devices()
self.get_events()
def send_message(self, command, token, args):
"""
Formats a message to be send to Telemetry WS server
"""
data = {
'token': token,
'command': command,
'params': args,
'version': API_VERSION_1,
}
json_data = json.dumps(data)
logging.debug('Sending request: {}'.format(json_data))
self.socket.send(json_data)
@staticmethod
def on_close(_):
"""
Run when ws closes.
"""
logging.info('Websocket connection closed.')
@staticmethod
def on_error(_, error):
"""
Print websocket error
"""
if type(error) is KeyboardInterrupt:
return
logging.error('Websocket connection error: {}'.format(error))
@staticmethod
def make_token():
"""
Generate request token
"""
seed = ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(20))
token = SHA256.new(seed).hexdigest()[0:38]
return token
def on_message(self, message):
"""
Print message received from websocket
"""
logging.debug('Received message: {}'.format(message))
data = json.loads(message)
if 'result' not in data:
return
if data['token'] == self.events_token:
event_updates = []
for result in data['result']:
for notification in result['Notifications']:
if 'updates' not in notification:
continue
for key, update in notification['updates'].items():
event_updates.append(update['value'])
if len(event_updates) != 0:
for event in event_updates:
self.send_log(event, syslogserver)
elif (
data['token'] == self.devices_get_token
or data['token'] == self.devices_sub_token
):
device_notifications = data['result'][0]['Notifications']
device_updates = {}
for notification in device_notifications:
if 'updates' not in notification:
continue
for key, value in notification['updates'].items():
device_updates[key] = value
self.process_devices(device_updates)
def get_events(self):
"""
Subscribes to Telemetry events
"""
logging.info('Subscribing to Telemetry events.')
self.events_token = self.make_token()
args = {'query': {'analytics': {'/events/activeEvents': True}}}
subscribe = threading.Thread(
target=self.send_message,
args=(SUBSCRIBE, self.events_token, args)
)
subscribe.start()
def get_and_subscribe_devices(self):
"""
Subscribes to the list of devices that are streaming data to CVP.
We'll use this list of devices keyed by the serial number to add more
info to the email.
"""
logging.info('Subscribing to Telemetry devices.')
self.devices_get_token = self.make_token()
self.devices_sub_token = self.make_token()
# Get the current object
get_args = {
'query': {'analytics': {'/DatasetInfo/EosSwitches': True}},
'count': False,
}
get_devices = threading.Thread(
target=self.send_message,
args=(GET, self.devices_get_token, get_args),
)
get_devices.start()
# subscribe for future changes
args = {'query': {'analytics': {'/DatasetInfo/EosSwitches': True}}}
subscribe = threading.Thread(
target=self.send_message,
args=(SUBSCRIBE, self.devices_sub_token, args),
)
subscribe.start()
def process_devices(self, device_updates):
"""
Iterate through the list of devices and store the mapping of
serial number to hostname
"""
for key, value in device_updates.items():
self.devices[key] = value['value']['hostname']
logging.info('Received devices. Total device count is {}.'.format(len(self.devices)))
def send_log(self, event, syslogserver):
"""
Send a syslog message using variables above
"""
logging.debug('Preparing log notification.')
data = event['data']
# Try to lookup the hostname, if not found return the serialnum
device_id = data.get('deviceId')
device_name = self.devices.get(device_id, device_id)
# If there is no device name/ID, the event likely occurred due to a CVP process.
event_location = device_name if device_name else 'backend analytics process'
key = event['key']
severity = event['severity']
title = event['title']
desc = event['description']
timestamp = event['timestamp'] / 1000 # ms to sec
formated_timestamp = time.strftime(DATE_FORMAT, time.localtime(timestamp))
body = '\n'.join([
'{} event on {} at {}'.format(severity, event_location, formated_timestamp),
'Description: {}'.format(desc),
'View Event at {}/telemetry/events/{}'.format(self.config.telemetryUrl, key),
])
print '-------------'
print body
|
import pandas as pd
from ..constants import PART_B_STUB, PART_B_STUB_SUM
from ..download.medicare import (list_part_b_files, list_part_d_files,
list_part_d_opi_files)
from ..utils.utils import isid
from . import PARTB_COLNAMES
def part_d_files(summary=False, usecols=None, years=range(2013, 2018)):
"""
summary=False -> Drug=True gives the larger/longer/more detailed files
summary=True -> Drug=False gives the summary file
"""
Drug = True if not summary else False
return pd.concat([pd.read_csv(x, usecols=usecols, sep='\t').assign(Year=y)
for (x, y) in list_part_d_files(Drug=Drug)
if y in years])
def part_d_opi_files(usecols=None, years=range(2013, 2018)):
return pd.concat([pd.read_csv(x, usecols=usecols).assign(Year=y)
for (x, y) in list_part_d_opi_files() if y in years])
def part_b_files(summary=False,
years=range(2012, 2018),
coldict=PARTB_COLNAMES,
columns=None):
# Columns takes a list of destination column names, and searches
# through the rename dicts to find the original column name
filestub = PART_B_STUB_SUM if summary else PART_B_STUB
params = search_column_rename_dict_for_colnames(columns, coldict)
return pd.concat([pd.read_csv(x, **params)
.assign(Year=y)
.rename(columns=coldict)
.rename(str.strip, axis='columns')
.rename(columns=coldict)
for (x, y) in list_part_b_files(filestub) if y in years])
def search_column_rename_dict_for_colnames(columns, coldict):
if columns:
cols = [key for key, val in coldict.items() if val in columns]
params = dict(usecols=lambda x: x in cols or x.strip() in cols)
else:
params = {}
return params
def part_d_info():
print('Note: both the Part D files and the Part B files have a long file '
'and a short (summary) file. The long file is at the physician-drug'
' or physician-procedure level, whereas the short file is a summary '
'at the physician level. An observation is censored if it has less '
'than 11 drug claims or less than 11 beneficiaries comprising it,'
' so the long files will have fewer physicians than the short files')
drug = part_d_files(summary=True, usecols=['npi', 'total_claim_count'])
print('Short Part D files:')
isid(drug, ['npi', 'Year'], noisily=True)
print('Missing total claim count:\t%s'
% drug.total_claim_count.isnull().sum())
print('0 total claims:\t\t\t%s' % (drug.total_claim_count == 0).sum())
print('10 total claims:\t\t%s' % (drug.total_claim_count == 10).sum())
print('11 total claims:\t\t%s' % (drug.total_claim_count == 11).sum())
print('12 total claims:\t\t%s' % (drug.total_claim_count == 12).sum())
drug_long = part_d_files(usecols=['npi', 'drug_name', 'generic_name',
'total_claim_count'])
print('Long Part D files:')
isid(drug_long, ['npi', 'Year', 'drug_name', 'generic_name'], noisily=True)
print('Missing total claim count:\t%s'
% drug_long.total_claim_count.isnull().sum())
print('0 total claims:\t\t\t%s' % (drug_long.total_claim_count == 0).sum())
print('10 total claims:\t\t%s' % (drug_long.total_claim_count == 10).sum())
print('11 total claims:\t\t%s' % (drug_long.total_claim_count == 11).sum())
print('12 total claims:\t\t%s' % (drug_long.total_claim_count == 12).sum())
print("Merging shows that all enrollment information is present"
" in the short Part D dataframe. About 22% of physicians who "
"prescribe in Part D do not do enough prescribing to show up in the "
"detailed files")
partd = drug.merge(
drug_long.groupby(['npi', 'Year'], as_index=False).sum(),
on=['npi', 'Year'],
how='outer',
indicator=True)
print(partd._merge.value_counts())
partd = (partd.sort_values(['npi', 'Year'])
.reset_index(drop=True)
.drop(columns='_merge')
.rename(columns={'total_claim_count_x':
'total_claim_count',
'total_claim_count_y':
'total_claim_count_drug_detail'}))
print('Opioid files:')
opi = part_d_opi_files()
print('Note: opioid files are at the same level of observation'
' as the short files')
assert all(
drug.sort_values(['npi', 'Year']).set_index(['npi', 'Year']).index
== (opi.rename(columns={'NPI': 'npi'})
.sort_values(['npi', 'Year'])
.set_index(['npi', 'Year']).index)
)
print('Opioid files do have zeros and nulls')
opi = opi[['NPI', 'Year', 'Total Claim Count', 'Opioid Claim Count']]
print('Missing total claim count:\t%s'
% opi['Opioid Claim Count'].isnull().sum())
print('0 total claims:\t\t\t%s' % (opi['Opioid Claim Count'] == 0).sum())
print('10 total claims:\t\t%s' % (opi['Opioid Claim Count'] == 10).sum())
print('11 total claims:\t\t%s' % (opi['Opioid Claim Count'] == 11).sum())
print('12 total claims:\t\t%s' % (opi['Opioid Claim Count'] == 12).sum())
print('of the 5,518,978 person-years in the Part D data, 1,430,428 are '
'listed with no opioids and 1,599,355 with null (meaning 1-10)')
opi.shape
opi[opi['Opioid Claim Count'] == 0].shape
opi[opi['Opioid Claim Count'].isnull()].shape
print('of the 1430428 with a 0 value, about 62% show up in both the '
'short and long file, whereas 38% show up in only the short file')
print(
opi[opi['Opioid Claim Count'] == 0]
.rename(columns={'NPI': 'npi'})
.merge(drug_long, how='left', indicator=True)
[['npi', 'Year', '_merge']]
.drop_duplicates()
._merge.value_counts())
print('of the 1599355 with a null value, about 2/3 show up in both the '
'short and long file, whereas 1/3 show up in only the short file')
print(
opi[opi['Opioid Claim Count'].isnull()]
.rename(columns={'NPI': 'npi'})
.merge(drug_long, how='left', indicator=True)
[['npi', 'Year', '_merge']]
.drop_duplicates()
._merge.value_counts())
print('Conclusion: there are real zeros in the opioid files! If '
'someone in general prescribes enough overall drugs to show up in '
'the Part D data (10 claims over all drugs total), then a zero '
'listed for them for opioids is a true zero. Confirmed in the '
'methodology documents: "opioid_claim_count – Total claims of opioid'
' drugs, including refills. The opioid_claim_count is suppressed '
'when opioid_claim_count is between 1 and 10."')
print('Finally, the opi files appear to be just a convenience cut of the '
'Part D summary file (would need to check all columns to verify):')
drug2 = part_d_files(summary=True, usecols=['npi', 'total_claim_count',
'opioid_claim_count',
'la_opioid_claim_count'])
opi = part_d_opi_files()
opi = opi[['NPI', 'Year', 'Total Claim Count', 'Opioid Claim Count',
'Long-Acting Opioid Claim Count']]
print(opi.shape)
print(drug2.shape)
print(opi.rename(columns={'NPI': 'npi',
'Total Claim Count': 'total_claim_count',
'Opioid Claim Count': 'opioid_claim_count',
'Long-Acting Opioid Claim Count':
'la_opioid_claim_count'})
.merge(drug2).shape)
def part_b_info():
print('Note: both the Part D files and the Part B files have a long file '
'and a short (summary) file. The long file is at the physician-drug'
' or physician-procedure level, whereas the short file is a summary '
'at the physician level. An observation is censored if it has less '
'than 11 drug claims or less than 11 beneficiaries comprising it,'
' so the long files will have fewer physicians than the short files')
df_sum = part_b_files(summary=True,
columns=['National Provider Identifier',
'Number of Medicare Beneficiaries'])
print('Short Part B files:')
isid(df_sum, ['National Provider Identifier', 'Year'], noisily=True)
print('Missing total claim count:\t%s'
% df_sum['Number of Medicare Beneficiaries'].isnull().sum())
print('0 total claims:\t\t\t%s'
% (df_sum['Number of Medicare Beneficiaries'] == 0).sum())
print('10 total claims:\t\t%s'
% (df_sum['Number of Medicare Beneficiaries'] == 10).sum())
print('11 total claims:\t\t%s'
% (df_sum['Number of Medicare Beneficiaries'] == 11).sum())
print('12 total claims:\t\t%s'
% (df_sum['Number of Medicare Beneficiaries'] == 12).sum())
df = part_b_files(columns=['National Provider Identifier', 'HCPCS Code',
'Place of Service',
'Number of Medicare Beneficiaries'])
print('Long Part B files:')
idcols = ['National Provider Identifier', 'HCPCS Code',
'Place of Service', 'Year']
isid(df, idcols, noisily=True)
print('Missing total claim count:\t%s'
% df['Number of Medicare Beneficiaries'].isnull().sum())
print('0 total claims:\t\t\t%s'
% (df['Number of Medicare Beneficiaries'] == 0).sum())
print('10 total claims:\t\t%s'
% (df['Number of Medicare Beneficiaries'] == 10).sum())
print('11 total claims:\t\t%s'
% (df['Number of Medicare Beneficiaries'] == 11).sum())
print('12 total claims:\t\t%s'
% (df['Number of Medicare Beneficiaries'] == 12).sum())
print('The three nulls are mistakes:')
print(df[df['Number of Medicare Beneficiaries'].isnull()])
df = df[~df['Number of Medicare Beneficiaries'].isnull()]
print("Merging shows that all enrollment information is present"
" in the short Part B dataframe. About 5% of physicians who "
"show up in Part B do not do enough procedures to show up in the "
"detailed procedure files")
partb = df_sum.merge(
df.groupby(['National Provider Identifier', 'Year'], as_index=False)
.sum(),
on=['National Provider Identifier', 'Year'],
how='outer',
indicator=True)
print(partb._merge.value_counts())
print('fraction:', 299738/(299738+5730647))
|
from os.path import realpath, join, dirname
class Config:
DEBUG = False
TESTING = False
SQLALCHEMY_DATABASE_URI = 'sqlite:////tmp/test.db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
@staticmethod
def init_app(app):
pass
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = 'mysql://user@localhost/foo'
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
here = realpath(dirname(__file__))
TESTING = True
DATABASE_PATH = join(here, 'tests', 'test.db')
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_PATH
config = {
'production': ProductionConfig,
'development': DevelopmentConfig,
'testing': TestingConfig,
'default': DevelopmentConfig,
}
|
"""
Given an array of n elements and an integer m.
The task is to find the maximum value of the
sum of its subarray modulo m i.e find the
sum of each subarray mod m and print
the maximum value of this modulo operation.
"""
from icecream import ic
def MaxSumMod(a, n, m):
maxi = 0
for i in range(1, n):
a[i] += a[i-1]
ic(a)
for i in range(n-1, 0, -1):
maxi = max(maxi, a[i] % m)
ic(maxi)
for j in range(i+1, n):
ic(j)
maxi = max(maxi, (a[j]-a[j-i-1]) % m)
ic(maxi)
return maxi
print(MaxSumMod([3, 3, 9, 9, 5], 5, 7))
|
import uuid
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, PermissionsMixin
)
from django.db import models
from django.utils.translation import gettext_lazy as _
class UserManager(BaseUserManager):
def create_user(self, username, password, email=None, contact_number='', location=None):
"""
Creates and saves a User with the given username and password.
"""
if not username:
raise ValueError('User must have an username')
if location == 'super':
username = username
user = self.model(username=username, email=email, phone_number=contact_number)
user.is_active = False
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, password):
"""
Creates and saves a superuser with the given username and password.
"""
user = self.create_user(
username=username,
password=password,
location='super'
)
user.is_admin = True
user.is_superuser = True
user.is_dashboard_user = True
user.is_pending = False
user.is_active = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
uuid = models.UUIDField(
unique=True,
default=uuid.uuid4,
editable=False,
verbose_name=_("UUID"),
help_text=_("This will be exposed to the outside world."),
)
username = models.CharField(
verbose_name='Username',
unique=True,
max_length=255,
)
first_name = models.CharField(
max_length=255,
blank=True,
help_text=_("User first name")
)
last_name = models.CharField(
max_length=255,
blank=True,
help_text=_("User last/nick name")
)
mid_name = models.CharField(
max_length=255,
blank=True,
help_text=_("User middle name")
)
email = models.EmailField(
verbose_name='email address',
max_length=255,
null=True,
blank=True
)
phone_number = models.CharField(
max_length=64,
verbose_name='Contact Number',
blank=True,
help_text=_('User Contact Number')
)
# user boolean field
is_active = models.BooleanField(default=True, help_text=_('Inactive user can do nothing in the system.'))
is_pending = models.BooleanField(default=True, help_text=_('User activity is pending for some reasons.'))
is_admin = models.BooleanField(default=False, help_text=_('User is a admin user.'))
is_dashboard_user = models.BooleanField(default=False, help_text=_('User is a dashboard user.'))
# blocking related fields
is_blocked = models.BooleanField(default=False, help_text=_('User is blocked by authority for some reasons.'))
# common fields
join_date = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(null=True, blank=True)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = []
class Meta:
ordering = ('-id',)
# TODO: Define index
def __str__(self):
return self.username
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
|
import pandas as pd
from tqdm import tqdm
from question_recommend import QuestionRecommendation, TfIdfSearch, MinHashSearch
from semantic_sim import SimServer
TEST_QUESTIONS = 'data/test_questions.txt'
TEST_DATASET = 'data/test_dataset.txt'
def save_test_questions():
df = pd.read_csv('data/quora-question-pairs/train.csv', index_col=0)
test_questions = []
test_dataset = []
num_test_questions = 1000
num_dataset_questions = 400_000
for i, row in df.iterrows():
if row['is_duplicate'] == 1 and num_test_questions != 0:
test_questions.append((row['question1'],
len(test_dataset)))
num_test_questions -= 1
else:
test_dataset.append(row['question1'])
num_dataset_questions -= 1
test_dataset.append(row['question2'])
num_dataset_questions -= 1
if num_dataset_questions <= 0:
break
with open(TEST_QUESTIONS, 'w') as f:
for q in test_questions:
print(f"{q[0]}\t{q[1]}", file=f)
with open(TEST_DATASET, 'w') as f:
for q in test_dataset:
print(f"{q}", file=f)
def save_bert_questions():
df = pd.read_csv('data/quora-question-pairs/train.csv', index_col=0)
test_questions = []
test_dataset = []
num_test_questions = 100
num_dataset_questions = 10_000
for i, row in df.iterrows():
if row['is_duplicate'] == 1 and num_test_questions != 0:
test_questions.append((row['question1'],
len(test_dataset)))
num_test_questions -= 1
else:
test_dataset.append(row['question1'])
num_dataset_questions -= 1
test_dataset.append(row['question2'])
num_dataset_questions -= 1
if num_dataset_questions <= 0:
break
with open('data/bert_questions.txt', 'w') as f:
for q in test_questions:
print(f"{q[0]}\t{q[1]}", file=f)
with open('data/bert_dataset.txt', 'w') as f:
for q in test_dataset:
print(f"{q}", file=f)
def evaluate(engine, k, limit=None):
num_questions = 0
num_correct_3 = 0
num_correct_5 = 0
num_correct_10 = 0
num_correct = 0
with open(TEST_QUESTIONS) as f:
for line in tqdm(f):
q, dupl = line.strip().split('\t')
retrieved = engine.search(q, k=k)
if int(dupl) in retrieved[:1000]:
num_correct_10 += 1
if int(dupl) in retrieved[:5]:
num_correct_5 += 1
if int(dupl) in retrieved[:3]:
num_correct_3 += 1
if int(dupl) in retrieved[:1]:
num_correct += 1
num_questions += 1
if num_questions == limit:
break
print(f"\nDetection @1: {100 * num_correct/num_questions} %")
print(f"Detection @3: {100 * num_correct_3/num_questions} %")
print(f"Detection @5: {100 * num_correct_5/num_questions} %")
print(f"Detection @10: {100 * num_correct_10/num_questions} %")
if __name__ == '__main__':
se = QuestionRecommendation(TEST_DATASET, SimServer.UNIV_SENT_ENCODER)
se1 = QuestionRecommendation(TEST_DATASET, SimServer.USE_QA)
se2 = QuestionRecommendation(TEST_DATASET, SimServer.USE_MULTILINGUAL)
se3 = QuestionRecommendation(TEST_DATASET, SimServer.USE_WITH_DAN)
tf = TfIdfSearch(TEST_DATASET)
lsh = MinHashSearch(TEST_DATASET)
print("Loaded indices", flush=True)
print("Standard USE: ")
evaluate(se, 20)
print("USE per Question Answering: ")
evaluate(se1, 20)
print("USE advanced multilingual: ")
evaluate(se2, 20)
print("Standard USE with DAN network: ")
evaluate(se3, 20)
print("TF-IDF based search : ")
evaluate(tf, 20, limit=500)
print("LSH based search : ")
evaluate(lsh, 1000)
print("BERT model based search: ")
# evaluate_bert_qqp(TEST_DATASET)
|
# Performs the G-test to estimate the goodness-of-fit of the GSD and QNormal models to real data.
#
# This script requires two parameters:
# (i) the number of chunks to cut the input data into and
# (ii) a zero-based chunk index of a chunk you want to process
#
# Author: Jakub Nawała <jnawala@agh.edu.pl>
# Date: March, 18 2020
import logging
from _logger import setup_console_and_file_logger
from probability_grid_estimation import preprocess_real_data, get_answer_counts, estimate_parameters
import csv
import pandas as pd
import bootstrap
import gsd
import qnormal
import numpy as np
from sys import argv
from pathlib import Path
logger = None
def read_input_data_subsection(grouped_scores: pd.core.groupby.GroupBy, n_subsection, subsection_idx):
"""
Inputs tidy subjective scores grouped by a certain feature (e.g., stimulus ID), splits the scores into
*n_subsection* subsections and returns DataFrameGroupBy keys for only the i-th subsection, where i is defined by the
*subsection_idx* parameter.
:param grouped_scores: scores grouped by a feature defining data granularity
:param n_subsection: a number saying into how many subsections to divide the input data into
:param subsection_idx: a zero-based index specifying which subsection of the input to return
:return: a list of DataFrameGroupBy keys of the selected subsection. Use these to read data relevant for the
chunk of interest from grouped_scores.
"""
group_key = list(grouped_scores.groups)
def chunkify(lst, n):
"""
See this StackOverflow answer for more details: https://stackoverflow.com/a/2136090/3978083
:param lst: a list to split into n chunks
:param n: the number of chunks into which to split the lst list
:return: a list containing n lists, each being a chunk of the lst list
"""
return [lst[i::n] for i in range(n)]
chunked_group_key = chunkify(group_key, n_subsection)
# coi - chunk of interest
keys_for_coi = chunked_group_key[subsection_idx]
return keys_for_coi
def get_each_answer_probability(psi_sigma_row, prob_generator):
"""
Translates psi and sigma (or rho) parameters into the probability of each answer.
:param psi_sigma_row: a 2-column vector with the first col. corresponding to psi and the second one to
sigma (or rho)
:param prob_generator: either gsd.prob or qnormal.prob
:return: a vector of probabilities of each answer
"""
psi = psi_sigma_row[0]
sigma_or_rho = psi_sigma_row[1]
return prob_generator(psi, sigma_or_rho)
def main(_argv):
assert len(_argv) == 4, "This script requires 3 parameters: the number of chunks, a zero-based chunk index and " \
"path of a CSV file you wish to process"
prob_grid_gsd_df = pd.read_pickle("gsd_prob_grid.pkl")
prob_grid_qnormal_df = pd.read_pickle("qnormal_prob_grid.pkl")
filepath_cli_idx = 3
in_csv_filepath = Path(_argv[filepath_cli_idx])
assert in_csv_filepath.exists() and in_csv_filepath.is_file(), f"Make sure the {_argv[filepath_cli_idx]} file " \
f"exists"
n_chunks_argv_idx = 1
chunk_idx_argv_idx = 2
n_chunks = int(_argv[n_chunks_argv_idx])
chunk_idx = int(_argv[chunk_idx_argv_idx])
assert n_chunks > 0 and 0 <= chunk_idx < n_chunks
# Create a logger here to make sure each log has a unique filename (according to a chunk being processed)
global logger
logger = setup_console_and_file_logger(name=__name__,
log_file_name="G_test_on_real_data" +
"_chunk{:03d}_of{:03d}".format(chunk_idx, n_chunks)
+ ".log",
level=logging.DEBUG)
logger.info("Reading chunk {} (from {} chunks)".format(chunk_idx, n_chunks))
# coi - chunk of interest
pvs_id_exp_grouped_scores = preprocess_real_data(in_csv_filepath, should_also_group_by_exp=True)
keys_for_coi = read_input_data_subsection(pvs_id_exp_grouped_scores, n_chunks, chunk_idx)
in_csv_filename_wo_ext = in_csv_filepath.stem # wo - without, ex - extension
csv_results_filename = "G_test_on_" + in_csv_filename_wo_ext + "_chunk{:03d}_".format(chunk_idx) + \
"of_{:03d}".format(n_chunks) + ".csv"
logger.info("Storing the results in the {} file".format(csv_results_filename))
with open(csv_results_filename, 'w', newline='', buffering=1) as csvfile:
fieldnames = ["PVS_id", "count1", "count2", "count3", "count4", "count5", "MOS", "Exp", "psi_hat_gsd",
"rho_hat", "psi_hat_qnormal", "sigma_hat", "T_gsd", "T_qnormal", "p-value_gsd", "p-value_qnormal"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
# iteration number to make it easier to assess the progress
it_num = 1
for pvs_id_exp_tuple in keys_for_coi:
pvs_id = pvs_id_exp_tuple[0]
exp_id = pvs_id_exp_tuple[1]
pvs_data = pvs_id_exp_grouped_scores.get_group(pvs_id_exp_tuple)
row_to_store = {"PVS_id": pvs_id, "Exp": exp_id}
logger.info("Iteration {}".format(it_num))
logger.info("Processing PVS {} from experiment {}".format(pvs_id, exp_id))
sample_scores = pvs_data["Score"]
mos = sample_scores.mean()
logger.info("MOS of the PVS {} in experiment {}: {:.3f}".format(pvs_id, exp_id, mos))
row_to_store["MOS"] = mos
score_counts = np.array(get_answer_counts(sample_scores))
row_to_store["count1"] = score_counts[0]
row_to_store["count2"] = score_counts[1]
row_to_store["count3"] = score_counts[2]
row_to_store["count4"] = score_counts[3]
row_to_store["count5"] = score_counts[4]
logger.info("Estimating both models parameters using MLE on the probability grid")
# est = esimated
psi_hat_gsd, rho_hat = estimate_parameters(sample_scores, prob_grid_gsd_df)
psi_hat_qnormal, sigma_hat = estimate_parameters(sample_scores, prob_grid_qnormal_df)
row_to_store["psi_hat_gsd"] = psi_hat_gsd
row_to_store["rho_hat"] = rho_hat
row_to_store["psi_hat_qnormal"] = psi_hat_qnormal
row_to_store["sigma_hat"] = sigma_hat
logger.info("Calculating T statistic for both models")
# exp_prob = expected probability
exp_prob_gsd = gsd.prob(psi_hat_gsd, rho_hat)
exp_prob_qnormal = qnormal.prob(psi_hat_qnormal, sigma_hat)
T_statistic_gsd = bootstrap.T_statistic(score_counts, exp_prob_gsd)
T_statistic_qnormal = bootstrap.T_statistic(score_counts, exp_prob_qnormal)
row_to_store["T_gsd"] = T_statistic_gsd
row_to_store["T_qnormal"] = T_statistic_qnormal
logger.info("Generating 10k bootstrap samples for both models")
n_total_scores = np.sum(score_counts)
n_bootstrap_samples = 10000
bootstrap_samples_gsd = gsd.sample(psi_hat_gsd, rho_hat, n_total_scores, n_bootstrap_samples)
bootstrap_samples_qnormal = qnormal.sample(psi_hat_qnormal, sigma_hat, n_total_scores, n_bootstrap_samples)
# Estimate GSD and QNormal parameters for each bootstrapped sample
logger.info("Estimating GSD and QNormal parameters for each bootstrapped sample")
psi_hat_rho_hat_gsd_bootstrap = np.apply_along_axis(estimate_parameters, axis=1, arr=bootstrap_samples_gsd,
prob_grid_df=prob_grid_gsd_df, sample_as_counts=True)
psi_hat_sigma_hat_qnormal_bootstrap = np.apply_along_axis(estimate_parameters, axis=1,
arr=bootstrap_samples_qnormal,
prob_grid_df=prob_grid_qnormal_df,
sample_as_counts=True)
# Translate the estimated bootstrap parameters into probabilities of each answer
logger.info("Translating the estimated parameters into probabilities of each answer")
bootstrap_exp_prob_gsd = np.apply_along_axis(get_each_answer_probability, axis=1,
arr=psi_hat_rho_hat_gsd_bootstrap, prob_generator=gsd.prob)
bootstrap_exp_prob_qnormal = np.apply_along_axis(get_each_answer_probability, axis=1,
arr=psi_hat_sigma_hat_qnormal_bootstrap,
prob_generator=qnormal.prob)
# Perform the G-test
logger.info("Performing the G-test")
p_value_g_test_gsd = bootstrap.G_test(score_counts, exp_prob_gsd, bootstrap_samples_gsd,
bootstrap_exp_prob_gsd)
p_value_g_test_qnormal = bootstrap.G_test(score_counts, exp_prob_qnormal, bootstrap_samples_qnormal,
bootstrap_exp_prob_qnormal)
row_to_store["p-value_gsd"] = p_value_g_test_gsd
row_to_store["p-value_qnormal"] = p_value_g_test_qnormal
logger.info("p-value (G-test) for GSD: {}".format(p_value_g_test_gsd))
logger.info("p-value (G-test) for QNormal: {}".format(p_value_g_test_qnormal))
writer.writerow(row_to_store)
it_num += 1
if __name__ == '__main__':
main(argv)
logger.info("Everything done!")
exit(0)
|
"""
Sieve of Eratosthenes
implementation
"""
def sieve(number: int) -> list:
"""
The Sieve of Eratosthenes is an algorithm for
finding all prime numbers up to any given limit
"""
num1: int = (number - 1) // 2
num2: int = 0
num3: int = 3
arr: list = [True] * num1
result: list = [2]
while (num3 ** 2) < number:
if arr[num2]:
result.append(num3)
num4 = 2 * num2 * num2 + 6 * num2 + 3
while num4 < num1:
arr[num4] = False
num4 = num4 + 2 * num2 + 3
num2 += 1
num3 += 2
while num2 < num1:
if arr[num2]:
result.append(num3)
num2 += 1
num3 += 2
return result
|
from flask import Blueprint, render_template, url_for, redirect
from flask_login import login_user, logout_user, login_required, current_user
from app.form import SearchForm, LoginForm, RegisterForm, WatchForm, UnWatchForm, DelPackageForm
from app.model import Express, Package, User
from config import INTERNAL_CODE
route = Blueprint("view", __name__)
@route.route("/", methods=["GET", "POST"])
def index():
form = SearchForm()
form.express_code.choices = [(e.code, e.name) for e in Express.query.all()]
if form.validate_on_submit():
if current_user and current_user.is_authenticated:
pkg = Package.get_package(current_user.user_id, form.express_code.data, form.package_number.data)
else:
pkg = None
form.errors['package_number'] = ['查询前请先登录!']
if pkg:
return redirect(url_for("view.package_info", package_id=pkg.package_id))
else:
if not form.errors:
form.errors['package_number'] = ['未找到相关信息, 请核实单号和物流公司']
return render_template("index.html", form=form)
@route.route("/package/<package_id>", methods=['GET', 'POST'])
@login_required
def package_info(package_id):
pkg = Package.get_package_by_id(package_id)
if not pkg:
return redirect(url_for("view.index"))
if pkg.user_id != current_user.user_id:
return redirect(url_for("view.user_package"))
express = Express.query.filter_by(express_id=pkg.express_id).first()
watch_form = WatchForm()
unwatch_form = UnWatchForm()
if watch_form.validate_on_submit() and pkg.package_id == watch_form.watch_package_id.data:
pkg.watching(watch_form.watch_nicename.data)
return redirect(url_for("view.package_info", package_id=pkg.package_id))
if unwatch_form.validate_on_submit() and pkg.package_id == unwatch_form.unwatch_package_id.data:
pkg.unwatching()
return redirect(url_for("view.package_info", package_id=pkg.package_id))
return render_template("package.html", package=pkg, express=express, watch_form=watch_form,
unwatch_form=unwatch_form)
@route.route("/user/package", methods=['GET', 'POST'])
@login_required
def user_package():
packages = Package.query.filter_by(user_id=current_user.user_id).all()
delete_form = DelPackageForm()
if delete_form.validate_on_submit():
pkg = Package.query.filter_by(package_id=delete_form.delete_package_id.data).first()
if pkg and pkg.user_id == current_user.user_id:
pkg.delete()
return redirect(url_for("view.user_package"))
return render_template("user/packages.html", packages=packages, delete_form=delete_form)
@route.route("/user/watching", methods=['GET', 'POST'])
@login_required
def user_watching():
packages = Package.get_watching_package_by_user_id(current_user.user_id)
unwatch_form = UnWatchForm()
if unwatch_form.validate_on_submit():
pkg = Package.query.filter_by(package_id=unwatch_form.unwatch_package_id.data).first()
if pkg and pkg.user_id == current_user.user_id:
pkg.unwatching()
return redirect(url_for("view.user_watching"))
return render_template("user/watching.html", packages=packages, unwatch_form=unwatch_form)
@route.route("/user/open-api")
@login_required
def user_token():
token = current_user.get_token()
return render_template("user/token.html", token=token)
@route.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remember.data)
return redirect(url_for("view.user_package"))
form.errors['username'] = ['用户名或密码错误']
return render_template("login.html", form=form)
@route.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for("view.index"))
@route.route("/register", methods=['GET', 'POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
if form.internal_code.data != INTERNAL_CODE:
form.errors['internal_code'] = ["内测码错误, 请联系作者。"]
if not form.errors:
user = User.query.filter_by(username=form.username.data).first()
if user:
form.errors['username'] = ['用户名 {} 已被占用'.format(form.username.data)]
else:
user = User.query.filter_by(email=form.email.data).first()
if user:
form.errors['email'] = ['邮箱 {} 已被占用'.format(form.email.data)]
if not form.errors:
user = User(form.username.data, form.email.data, form.password.data)
if user:
return redirect(url_for("view.login"))
return render_template("register.html", form=form)
@route.route("/about")
def about():
return render_template("about.html")
|
import matplotlib
matplotlib.use('Agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.insert(0, '../')
import utils.trajectory_utils
reload(utils.trajectory_utils)
from utils.trajectory_utils import TrajectoryLoader
from nets.net import *
from envs.env_transform import *
from envs.gym_env import *
from env_model.model import *
import shutil
import os
colors = ['blue', 'red', 'green', 'brown', 'gray', 'yellow', 'cyan', 'purple']
counter = 0
env = get_env("CartPole-v1", False, ["Penalizer"])
envOps = EnvOps(env.observation_space.shape, env.action_space.n, 0)
env_model = EnvModelCartPoleManual(envOps)
init_nn_library(True, "1")
modelOps = DqnOps(2)
modelOps.INPUT_SIZE = (4,)
q_model = CartPoleModel(modelOps)
basedir = 'dqn_v_change_cartpole'
if os.path.exists(basedir):
shutil.rmtree(basedir)
os.makedirs(basedir)
def plot_log(fname):
global counter
N = 100000
#tl = TrajectoryLoader(fname)
#s = tl.sample(N)
#print(s['current'])
smin = [-2.4, -1, -0.20943, -1]
smax = [2.4, 1, 0.20943, 1]
samples = np.random.uniform(smin, smax, size=(N,len(smin)))
s = {}
s['current'] = samples
res = env_model.predict_next(samples)
a = np.random.randint(0, 2, (N,))
s['next'] = res[0]
s['next'][a==1] = res[1][a==1]
s['done'] = res[3]
s['done'][a==1] = res[4][a==1]
print(s['current'].shape)
print(s['next'].shape)
print(s['done'].shape)
for step,ID in zip(range(100, 50001, 100), range(0, 10000)):
#q_model.model.load_weights('../test_cartpole2/dqn-15/weights_{}.h5'.format(step))
q_model.model.load_weights('algo_convergence_cartpole/dqn/train-0/weights_{}.h5'.format(step))
s['qvalue'] = q_model.q_value(s['current']).max(axis=1)#[:,1]#
print('STEP', step)
for I in range(s['next'].shape[1]):
for J in range(s['next'].shape[1]):
if I<J: #and J<K:# and I==0 and J==1: # and I==0 and J==2
fig = plt.figure(figsize=(8, 8), facecolor='w', edgecolor='k') #num=I*16+J*4, #dpi=80,
i1 = s['done'].flatten() == False
i2 = s['done'].flatten() == True
plt.scatter(s['current'][:,I], s['current'][:,J], s=20, c=s['qvalue'][:], alpha=0.4, edgecolors='none', cmap=plt.get_cmap('viridis'))
plt.scatter(s['next'][i2,I], s['next'][i2,J], s=1, c=colors[counter*2+1], alpha=1)
plt.title('{} x {}'.format(I, J))
plt.suptitle('Step {}'.format(step))
ax = plt.gca()
ax.set_facecolor((0.0, 0.0, 0.0))
plot_dir = '{}/{}-{}'.format(basedir, I, J)
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
plt.savefig('{}/{}.png'.format(plot_dir, ID))
plt.close()
counter += 1
plot_log('../test_tensorboard/traj-9_mix.h5')
|
#!/bin/env python2.7
##
## Copyright (c) 2010-2017 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
cores = [[0,20], [1,21], [2,22], [3,23], [4,24], [5,25], [6,26], [7,27], [8,28], [9,29]]
|
"""
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.data (the value of the node)"""
def _topLeft(root):
if root is not None:
_topLeft(root.left)
print root.data,
def _topRight(root):
if root is not None:
print root.data,
_topRight(root.right)
def topView(root):
_topLeft(root.left)
print root.data,
_topRight(root.right)
|
import torch
import torch.nn as nn
import math
from kobert.pytorch_kobert import get_pytorch_kobert_model
def _gen_attention_mask(token_ids, valid_length):
attention_mask = torch.zeros_like(token_ids)
for i, v in enumerate(valid_length):
attention_mask[i][:v] = 1
return attention_mask.float()
class CDMMB(nn.Module):
def __init__(self, config):
super(CDMMB, self).__init__()
self.config = config
bertmodel, vocab = get_pytorch_kobert_model()
self.bert = bertmodel
self.vocab = vocab
self.top_rnn = nn.GRU(input_size=config.hidden_size, hidden_size=config.rnn_hidden_size,
dropout=0, bidirectional=False, batch_first=True)
self.user_rnn = nn.GRU(input_size=config.embedding_size, hidden_size=config.rnn_hidden_size,
dropout=0, bidirectional=False, batch_first=True)
self.classifier = nn.Linear(config.rnn_hidden_size * 4, config.num_classes)
self.dropout = nn.Dropout(p=config.dr_rate)
self.user_embedding = nn.Embedding(config.user_size+1, config.embedding_size, padding_idx=0)
self.user_embedding.weight.requires_grad = True
def _attention_net(self, rnn_output, final_hidden_state):
scale = 1. / math.sqrt(self.config.rnn_hidden_size)
query = final_hidden_state.unsqueeze(1) # [BxQ] -> [Bx1xQ]
keys = rnn_output.permute(0, 2, 1) # [BxTxK] -> [BxKxT]
energy = torch.bmm(query, keys) # [Bx1xQ]x[BxKxT] -> [Bx1xT]
energy = nn.functional.softmax(energy.mul_(scale), dim=2) # scale, normalize
values = rnn_output # [BxTxV]
linear_combination = torch.bmm(energy, values).squeeze(1) # [Bx1xT]x[BxTxV] -> [BxV]
return linear_combination
def _user(self, users, conv_length):
embedded_users = self.user_embedding(users)
users_input = nn.utils.rnn.pack_padded_sequence(embedded_users, conv_length,
batch_first=True, enforce_sorted=False)
packed_output, hidden = self.user_rnn(users_input)
rnn_output, _ = nn.utils.rnn.pad_packed_sequence(packed_output, batch_first=True)
attn_output = self._attention_net(rnn_output, hidden[-1])
return attn_output
def _conv(self, token_ids, valid_length, segment_ids, conv_length, batch_size):
attention_mask = _gen_attention_mask(token_ids, valid_length)
_, pooler = self.bert(input_ids=token_ids, token_type_ids=segment_ids.long(),
attention_mask=attention_mask.float().to(token_ids.device))
output_pooler = pooler.view(batch_size, -1, self.config.hidden_size)
convs_input = nn.utils.rnn.pack_padded_sequence(output_pooler, conv_length,
batch_first=True, enforce_sorted=False)
packed_output, hidden = self.top_rnn(convs_input)
rnn_output, _ = nn.utils.rnn.pad_packed_sequence(packed_output, batch_first=True)
avg_pool = nn.functional.adaptive_avg_pool1d(rnn_output.permute(0, 2, 1), 1).view(batch_size, -1)
max_pool = nn.functional.adaptive_max_pool1d(rnn_output.permute(0, 2, 1), 1).view(batch_size, -1)
return hidden[-1], avg_pool, max_pool
def forward(self, token_ids, valid_length, segment_ids, users, conv_length):
batch_size = len(conv_length)
user_hidden = self._user(users, conv_length)
conv_hidden, conv_avg_pool, conv_max_pool = self._conv(token_ids, valid_length, segment_ids,
conv_length, batch_size)
merged_output = [user_hidden, conv_hidden, conv_avg_pool, conv_max_pool]
merged_output = torch.cat(merged_output, dim=1)
out = self.classifier(merged_output)
return out
|
# -*- coding: utf-8 -*-
import asyncio
from aiorpc.log import rootLogger
from aiorpc.constants import SOCKET_RECV_SIZE
__all__ = ['Connection']
_logger = rootLogger.getChild(__name__)
class Connection:
def __init__(self, reader, writer, unpacker):
self.reader = reader
self.writer = writer
self.unpacker = unpacker
self._is_closed = False
self.peer = self.writer.get_extra_info('peername')
async def sendall(self, raw_req, timeout):
_logger.debug('sending raw_req {} to {}'.format(
str(raw_req), self.peer))
self.writer.write(raw_req)
await asyncio.wait_for(self.writer.drain(), timeout)
_logger.debug('sending {} completed'.format(str(raw_req)))
async def recvall(self, timeout):
_logger.debug('entered recvall from {}'.format(self.peer))
# buffer, line = bytearray(), b''
# while not line.endswith(b'\r\n'):
# _logger.debug('receiving data, timeout: {}'.format(timeout))
# line = await asyncio.wait_for(self.reader.readline(), timeout)
# if not line:
# break
# _logger.debug('received data {}'.format(line))
# buffer.extend(line)
# _logger.debug('buffer: {}'.format(buffer))
req = None
while True:
data = await asyncio.wait_for(self.reader.read(SOCKET_RECV_SIZE), timeout)
_logger.debug('receiving data {} from {}'.format(data, self.peer))
if not data:
raise IOError('Connection to {} closed'.format(self.peer))
self.unpacker.feed(data)
try:
req = next(self.unpacker)
break
except StopIteration:
continue
_logger.debug('received req from {} : {}'.format(self.peer, req))
_logger.debug('exiting recvall from {}'.format(self.peer))
return req
def close(self):
self.reader.feed_eof()
self.writer.close()
self._is_closed = True
def is_closed(self):
return self._is_closed
|
from geosquizzy.validation.messages import MESSAGES
class FeatureSyntaxError(SyntaxError):
def __init__(self):
super(FeatureSyntaxError, self).__init__(MESSAGES['1'])
class FeatureStructureError(Exception):
def __init__(self):
super(FeatureStructureError, self).__init__(MESSAGES['2'])
class FeatureCoordinatesError(Exception):
def __init__(self):
super(FeatureCoordinatesError, self).__init__(MESSAGES['3']) |
def mymethod(self):
return self.x > 100
class_name = "MyClass"
base_classes = tuple()
params= {"x": 10, "check_greater": mymethod}
MyClass = type("MyClass", base_classes, params)
obj = MyClass()
print(obj.check_greater()) |
import asyncio
import functools
import pathlib
import threading
from http import server
import pytest
from digslash import sites
def get_dir(dirname):
return pathlib.os.path.join(
pathlib.os.path.dirname(__file__),
dirname
)
@pytest.fixture
def website1():
web_dir = get_dir('website-1')
httpd = server.HTTPServer(
('127.0.0.1', 8000),
functools.partial(server.SimpleHTTPRequestHandler, directory=web_dir)
)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
site = sites.Site('http://127.0.0.1:8000/')
yield site
httpd.server_close()
httpd.shutdown()
httpd_thread.join()
@pytest.fixture
def website1_with_duplicates():
web_dir = get_dir('website-1')
httpd = server.HTTPServer(
('127.0.0.1', 8000),
functools.partial(server.SimpleHTTPRequestHandler, directory=web_dir)
)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
site = sites.Site('http://127.0.0.1:8000/', deduplicate=False)
yield site
httpd.server_close()
httpd.shutdown()
httpd_thread.join()
@pytest.fixture
def website2():
web_dir = get_dir('website-2')
httpd = server.HTTPServer(
('127.0.0.1', 8000),
functools.partial(server.SimpleHTTPRequestHandler, directory=web_dir)
)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
site = sites.Site('http://127.0.0.1:8000/')
yield site
httpd.server_close()
httpd.shutdown()
httpd_thread.join()
def test_handle_duplicates(website1):
asyncio.run(website1.crawl())
assert set(website1.results.keys()) == {
'http://127.0.0.1:8000/',
'http://127.0.0.1:8000/pages/contact.html',
'http://127.0.0.1:8000/pages/about.html',
'http://127.0.0.1:8000/pages/feedback.html',
'http://127.0.0.1:8000/js/script.js',
'http://127.0.0.1:8000/scripts/feedback.html',
}
def test_keep_duplicates(website1_with_duplicates):
asyncio.run(website1_with_duplicates.crawl())
assert set(website1_with_duplicates.results.keys()) == {
'http://127.0.0.1:8000/',
'http://127.0.0.1:8000/pages/contact.html',
'http://127.0.0.1:8000/pages/about.html',
'http://127.0.0.1:8000/pages/feedback.html',
'http://127.0.0.1:8000/js/script.js',
'http://127.0.0.1:8000/scripts/feedback.html',
'http://127.0.0.1:8000/index.html',
}
def test_site_response_content_type(website2):
asyncio.run(website2.crawl())
assert website2.results == {
'http://127.0.0.1:8000/': {
'checksum': '4d651f294542b8829a46d8dc191838bd',
'content_type': 'text/html',
'encoding': 'utf-8',
'source': '',
},
'http://127.0.0.1:8000/code.js': {
'checksum': 'b4577eafb339aab8076a1e069e62d2c5',
'content_type': 'application/javascript',
'encoding': 'ascii',
'source': 'http://127.0.0.1:8000/page.html',
},
'http://127.0.0.1:8000/page.html': {
'checksum': '091ee4d646a8e62a6bb4092b439b07a1',
'content_type': 'text/html',
'encoding': 'latin_1',
'source': 'http://127.0.0.1:8000/',
}
}
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-06-05 14:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_migration', '0003_auto_20180531_1052'),
]
operations = [
migrations.AlterField(
model_name='temporarymigrationuserstore',
name='answer_one',
field=models.CharField(blank=True, max_length=128, null=True),
),
migrations.AlterField(
model_name='temporarymigrationuserstore',
name='answer_two',
field=models.CharField(blank=True, max_length=128, null=True),
),
]
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import json
import requests
import logging
import urllib.request, urllib.error, urllib.parse
import uuid
import random
# Our imports
import emission.core.get_database as edb
try:
key_file = open('conf/net/ext_service/habitica.json')
key_data = json.load(key_file)
url = key_data["url"]
except:
logging.exception("habitica not configured, game functions not supported")
def habiticaRegister(username, email, password, our_uuid):
user_dict = {}
#if user is already in e-mission db, try to load user data
if edb.get_habitica_db().find({'user_id': our_uuid}).count() == 1:
try:
result = habiticaProxy(our_uuid, 'GET', '/api/v3/user', None)
user_dict = result.json()
logging.debug("parsed json from GET habitica user = %s" % user_dict)
#if it fails, then user is in db but not in Habitica, so needs to create new account
#FIX! Still need to test if this will throw an error correctly
except urllib.error.HTTPError:
user_dict = newHabiticaUser(username, email, password, our_uuid)
edb.get_habitica_db().update({"user_id": our_uuid},{"$set":
initUserDoc(our_uuid, username, password, user_dict)
},upsert=True)
#if user_dict['data']['party']['_id']:
#edb.get_habitica_db().update({"user_id": our_uuid},{"$set": {'habitica_group_id': user_dict['data']['party']['_id']}},upsert=True)
#now we have the user data in user_dict, so check if db is correct
#Fix! should prob check here if our db is right
#if user is not in db, try to log in using email and password
else:
try:
login_url = url + '/api/v3/user/auth/local/login'
user_request = {'username': username,'email': email,'password': password}
logging.debug("About to login %s"% user_request)
login_response = requests.post(login_url, json=user_request)
logging.debug("response = %s" % login_response)
#if 401 error, then user is not in Habitica, so create new account and pass user to user_dict
if login_response.status_code == 401:
user_dict = newHabiticaUser(username, email, password, our_uuid)
else:
logging.debug("habitica http response from login = %s" % login_response)
user_auth = json.loads(login_response.text)
logging.debug("parsed json from habitica has keys = %s" % user_auth)
#login only returns user auth headers, so now get authenticated user and put it in user_dict
auth_headers = {'x-api-user': user_auth['data']['id'], 'x-api-key': user_auth['data']['apiToken']}
get_user_url = url + '/api/v3/user'
result = requests.request('GET', get_user_url, headers=auth_headers, json={})
logging.debug("result = %s" % result)
result.raise_for_status()
user_dict = result.json()
user_dict['data']['apiToken'] = user_auth['data']['apiToken']
logging.debug("parsed json from GET habitica user = %s" % user_dict)
#If if fails to login AND to create new user, throw exception
except:
logging.exception("Exception while trying to login/signup!")
logging.debug("habitica user to be created in our db = %s" % user_dict['data'])
#Now save new user (user_dict) to our db
#Since we are randomly generating the password, we store it in case users
#want to access their Habitica account from the browser
#Need to create a way from them to retrieve username/password
#metrics_data is used to calculate points based on km biked/walked
#last_timestamp is the last time the user got points, and bike/walk_count are the leftover km
habitica_user_table = edb.get_habitica_db()
insert_doc = initUserDoc(our_uuid, username, password, user_dict)
insert_doc.update({'user_id': our_uuid})
habitica_user_table.insert(insert_doc)
#Since we have a new user in our db, create its default habits (walk, bike)
setup_default_habits(our_uuid)
return user_dict
def initUserDoc(user_id, username, password, user_dict):
return {'task_state': {},
'habitica_username': username,
'habitica_password': password,
'habitica_id': user_dict['data']['_id'],
'habitica_token': user_dict['data']['apiToken']}
def newHabiticaUser(username, email, password, our_uuid):
register_url = url + '/api/v3/user/auth/local/register'
user_request = {'username': username,'email': email,'password': password,'confirmPassword': password}
logging.debug("About to register %s"% user_request)
u = requests.post(register_url, json=user_request)
# Bail out if we get an error
u.raise_for_status()
user_dict = json.loads(u.text)
logging.debug("parsed json from habitica has keys = %s" % list(user_dict.keys()))
return user_dict
def habiticaProxy(user_uuid, method, method_url, method_args):
logging.debug("For user %s, about to proxy %s method %s with args %s" %
(user_uuid, method, method_url, method_args))
stored_cfg = get_user_entry(user_uuid)
auth_headers = {'x-api-user': stored_cfg['habitica_id'],
'x-api-key': stored_cfg['habitica_token']}
logging.debug("auth_headers = %s" % auth_headers)
habitica_url = url + method_url
result = requests.request(method, habitica_url,
headers=auth_headers,
json=method_args)
logging.debug("result = %s" % result)
result.raise_for_status()
# result['testing'] = 'test'
temp = result.json()
temp['auth'] = {'apiId': stored_cfg['habitica_id'],
'apiToken': stored_cfg['habitica_token']}
result.encoding, result._content = 'utf8', json.dumps(temp).encode()
return result
def setup_party(user_id, group_id_from_url, inviterId):
#check if user is already in a party
method_url = "/api/v3/user"
result = habiticaProxy(user_id, 'GET', method_url, None)
data = result.json()
if '_id' in data['data']['party']:
group_id = data['data']['party']['_id']
logging.info("User %s is already part of group %s" % (user_id, group_id))
raise RuntimeError("User %s is already a part of group %s" % (user_id, group_id))
#if the user is not already in a party, then add them to the party to which they were invited
else:
group_id = group_id_from_url
invite_uri = "/api/v3/groups/"+group_id+"/invite"
logging.debug("invite user to party api url = %s" % invite_uri)
user_val = list(edb.get_habitica_db().find({"user_id": user_id}))[0]
method_args = {'uuids': [user_val['habitica_id']], 'inviter': group_id, 'emails': []}
emInviterId = edb.get_habitica_db().find_one({"habitica_id": inviterId})["user_id"]
response = habiticaProxy(emInviterId, 'POST', invite_uri, method_args)
logging.debug("invite user to party response = %s" % response)
join_url = "/api/v3/groups/"+group_id+"/join"
response2 = habiticaProxy(user_id, 'POST', join_url, {})
response.raise_for_status()
response2.raise_for_status()
return group_id
def setup_default_habits(user_id):
bike_walk_habit = {'type': "habit", 'text': "Bike and Walk", 'notes': "Automatically get points for every 1 km walked or biked. ***=== DO NOT EDIT BELOW THIS POINT ===*** AUTOCHECK: {\"mapper\": \"active_distance\", \"args\": {\"walk_scale\": 1000, \"bike_scale\": 1000}}", 'up': True, 'down': False, 'priority': 2}
bike_walk_habit_id = create_habit(user_id, bike_walk_habit)
invite_friends = {'type': "habit", 'text': "Spread the word", 'notes': "Get points for inviting your friends! We're better together.", 'up': True, 'down': False, 'priority': 2}
invite_friends_id = create_habit(user_id, invite_friends)
def create_habit(user_id, new_habit):
method_uri = "/api/v3/tasks/user"
get_habits_uri = method_uri + "?type=habits"
#First, get all habits and check if the habit requested already exists
result = habiticaProxy(user_id, 'GET', get_habits_uri, None)
habits = result.json()
for habit in habits['data']:
if habit['text'] == new_habit['text']:
#if the habit requested already exists, return it
return habit['_id']
#if habit not found, create habit
response = habiticaProxy(user_id, 'POST', method_uri, new_habit)
habit_created = response.json()
return habit_created['data']['_id']
# Should we have an accessor class for this?
# Part of the integration, not part of the standard timeseries
def get_user_entry(user_id):
user_query = {'user_id': user_id}
# TODO: Raise a real, descriptive exception here instead of asserting
assert(edb.get_habitica_db().find(user_query).count() == 1)
stored_cfg = edb.get_habitica_db().find_one(user_query)
return stored_cfg
def save_user_entry(user_id, user_entry):
assert(user_entry["user_id"] == user_id)
return edb.save(edb.get_habitica_db(), user_entry)
|
import json
from django.contrib import messages
from crispy_forms import layout
from django import forms
from django.template import defaultfilters
from django.http import Http404
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy
from django_cradmin import crapp
from django_cradmin.crispylayouts import PrimarySubmit
from django_cradmin.viewhelpers import formbase
from django_cradmin.viewhelpers import listbuilder
from django_cradmin.viewhelpers import listbuilderview
from devilry.apps.core.models import Assignment
from devilry.apps.core.models import Candidate
from devilry.apps.core.models import Period
from devilry.utils.passed_in_previous_period import PassedInPreviousPeriod, SomeCandidatesDoesNotQualifyToPass, \
NoCandidatesPassed
class SelectPeriodForm(forms.Form):
semester = forms.ModelChoiceField(
widget=forms.RadioSelect(),
queryset=Period.objects.none(),
empty_label=None,
)
def __init__(self, *args, **kwargs):
period_queryset = kwargs.pop('period_queryset')
super(SelectPeriodForm, self).__init__(*args, **kwargs)
self.fields['semester'].queryset = period_queryset
self.fields['semester'].label_from_instance = self.label_from_instance
@staticmethod
def label_from_instance(obj):
return "{} - {} ({} - {})".format(
obj.short_name,
obj.long_name,
defaultfilters.date(obj.start_time, 'SHORT_DATETIME_FORMAT'),
defaultfilters.date(obj.end_time, 'SHORT_DATETIME_FORMAT'))
class SelectPeriodView(formbase.FormView):
form_class = SelectPeriodForm
template_name = 'devilry_admin/assignment/passed_previous_period/select-period-view.django.html'
def __init__(self, **kwargs):
super(SelectPeriodView, self).__init__(**kwargs)
self.no_past_period = False
def dispatch(self, request, *args, **kwargs):
self.assignment = self.request.cradmin_role
self.devilryrole = self.request.cradmin_instance.get_devilryrole_for_requestuser()
if self.assignment.is_fully_anonymous and self.devilryrole != 'departmentadmin':
raise Http404()
if self.assignment.is_semi_anonymous and self.devilryrole == 'periodadmin':
raise Http404()
return super(SelectPeriodView, self).dispatch(request, *args, **kwargs)
def get_pagetitle(self):
return ugettext_lazy('Select the earliest semester you want to approve for')
def __get_period_queryset(self):
return Period.objects.filter(
parentnode=self.assignment.parentnode.parentnode,
assignments__short_name=self.assignment.short_name
).prefetch_related('assignments')\
.exclude(start_time__gte=self.assignment.parentnode.start_time)\
.order_by('start_time')
def get_buttons(self):
return [
PrimarySubmit('Next', ugettext_lazy('Next'))
]
def get_field_layout(self):
return [
layout.Div('semester', css_class='cradmin-globalfields')
]
def form_valid(self, form):
period = form.cleaned_data['semester']
return redirect(self.get_redirect_url(period))
def get_form_kwargs(self):
kwargs = super(SelectPeriodView, self).get_form_kwargs()
kwargs['period_queryset'] = self.__get_period_queryset()
if len(kwargs['period_queryset']) <= 0:
self.no_past_period = True
return kwargs
def get_context_data(self, **kwargs):
context = super(SelectPeriodView, self).get_context_data(**kwargs)
if self.no_past_period:
context['no_past_period'] = True
context['assignment'] = self.assignment
return context
def get_redirect_url(self, period):
return self.request.cradmin_app.reverse_appurl(
'assignments',
kwargs={'period_id': period.id}
)
class AssignmentItemValue(listbuilder.itemvalue.TitleDescription):
template_name = 'devilry_admin/assignment/passed_previous_period/assignment-item-value.django.html'
def __init__(self, **kwargs):
super(AssignmentItemValue, self).__init__(**kwargs)
self.period_start = self.value.parentnode.start_time
self.period_end = self.value.parentnode.end_time
self.max_points = self.value.max_points
self.passing_grade_min_points = self.value.passing_grade_min_points
def get_title(self):
return '{} - {}'.format(self.value.long_name, self.value.parentnode.long_name)
class PassedPreviousAssignmentView(listbuilderview.View):
model = Assignment
template_name = 'devilry_admin/assignment/passed_previous_period/assignment-overview.django.html'
value_renderer_class = AssignmentItemValue
def dispatch(self, request, *args, **kwargs):
self.period = Period.objects.get(id=kwargs.pop('period_id'))
self.assignment = self.request.cradmin_role
self.devilryrole = self.request.cradmin_instance.get_devilryrole_for_requestuser()
if self.assignment.is_fully_anonymous and self.devilryrole != 'departmentadmin':
raise Http404()
if self.assignment.is_semi_anonymous and self.devilryrole == 'periodadmin':
raise Http404()
return super(PassedPreviousAssignmentView, self).dispatch(request, *args, **kwargs)
def get_queryset_for_role(self, role):
return self.model.objects.filter(
short_name=role.short_name,
parentnode__start_time__gte=self.period.start_time,
parentnode__end_time__lt=self.assignment.period.end_time,
parentnode__parentnode=self.assignment.parentnode.parentnode
).select_related('parentnode__parentnode')
def get_pagetitle(self):
return ugettext_lazy('Confirm assignments')
def get_context_data(self, **kwargs):
context = super(PassedPreviousAssignmentView, self).get_context_data(**kwargs)
context['period_id'] = self.period.id
return context
class CandidateItemValue(listbuilder.itemvalue.TitleDescription):
template_name = 'devilry_admin/assignment/passed_previous_period/candidate-item-value.django.html'
def __init__(self, **kwargs):
self.current_assignment = kwargs.pop('current_assignment')
self.devilryrole = kwargs.pop('devilryrole')
self.util_class = kwargs.pop('util_class')
super(CandidateItemValue, self).__init__(**kwargs)
self.assignment = Assignment.objects \
.prefetch_point_to_grade_map() \
.get(id=self.value.assignment_group.parentnode.id)
self.period = self.value.assignment_group.parentnode.parentnode
self.feedback = self.value.assignment_group.cached_data.last_published_feedbackset
self.calculated_points = self.util_class.convert_points(self.feedback)
class CandidateListbuilder(listbuilder.base.List):
def __init__(self, current_assignment, devilryrole, util_class):
super(CandidateListbuilder, self).__init__()
self.current_assignment = current_assignment
self.devilry_role = devilryrole
self.util_class = util_class
def __get_candidate_queryset(self):
return self.util_class.get_queryset()
def build(self):
self.extend_with_values(
value_iterable=self.__get_candidate_queryset(),
value_renderer_class=CandidateItemValue,
frame_renderer_class=listbuilder.itemframe.DefaultSpacingItemFrame,
value_and_frame_renderer_kwargs={
'current_assignment': self.current_assignment,
'devilryrole': self.devilry_role,
'util_class': self.util_class
})
class ApprovePreviousForm(forms.Form):
candidates = forms.HiddenInput()
class ApprovePreviousAssignments(formbase.FormView):
form_class = ApprovePreviousForm
template_name = 'devilry_admin/assignment/passed_previous_period/confirm-view.django.html'
def dispatch(self, request, *args, **kwargs):
self.period = Period.objects.get(id=kwargs.pop('period_id'))
self.assignment = self.request.cradmin_role
self.devilryrole = self.request.cradmin_instance.get_devilryrole_for_requestuser()
self.util_class = PassedInPreviousPeriod(self.assignment, self.period, self.request.user)
if self.assignment.is_fully_anonymous and self.devilryrole != 'departmentadmin':
raise Http404()
if self.assignment.is_semi_anonymous and self.devilryrole == 'periodadmin':
raise Http404()
return super(ApprovePreviousAssignments, self).dispatch(request, *args, **kwargs)
def get_pagetitle(self):
return ugettext_lazy('Approve assignments')
def __get_candidate_listbuilder(self):
listbuilder = CandidateListbuilder(self.assignment, self.devilryrole, self.util_class)
listbuilder.build()
return listbuilder
def __get_candidate_ids(self):
return [candidate.id for candidate in self.util_class.get_queryset()]
def get_context_data(self, **kwargs):
context = super(ApprovePreviousAssignments, self).get_context_data(**kwargs)
context['period_id'] = self.period.id
context['candidate_list'] = self.__get_candidate_listbuilder()
return context
def get_buttons(self):
return [
PrimarySubmit('Confirm', ugettext_lazy('Confirm'))
]
def get_field_layout(self):
return [
layout.Hidden('candidates', self.__get_candidate_ids())
]
def __get_candidates_displayname(self, candidates):
candidate_short_name = ""
for candidate in candidates:
candidate_short_name += '{}, '.format(candidate.relatedstudent.user.get_displayname())
return candidate_short_name[:-2]
def form_valid(self, form):
try:
candidates = Candidate.objects.filter(id__in=json.loads(form.data['candidates']))\
.select_related('relatedstudent__user')
self.util_class.set_passed_in_current_period(
candidates,
self.request.user
)
except SomeCandidatesDoesNotQualifyToPass as e:
messages.warning(
self.request,
ugettext_lazy('Some students does not qualify to pass the assignment.')
)
except NoCandidatesPassed:
messages.warning(
self.request,
ugettext_lazy('No students are qualified to get approved '
'for this assignment from a previous assignment.')
)
except:
messages.warning(
self.request,
ugettext_lazy('An error occurred.')
)
else:
messages.success(
self.request,
ugettext_lazy(
'%(students)s was marked as approved for this assignment.') % {
'students': self.__get_candidates_displayname(candidates)
}
)
return redirect(self.get_success_url())
def get_success_url(self):
return self.request.cradmin_instance.reverse_url(appname="overview", viewname=crapp.INDEXVIEW_NAME)
|
"""
Find Duplicate Subtrees
Given the root of a binary tree, return all duplicate subtrees.
For each kind of duplicate subtrees, you only need to return the root node of any one of them.
Two trees are duplicate if they have the same structure with the same node values.
Example 1:
Input: root = [1,2,3,4,null,2,4,null,null,4]
Output: [[2,4],[4]]
Example 2:
Input: root = [2,1,1]
Output: [[1]]
Example 3:
Input: root = [2,2,2,3,null,3,null]
Output: [[2,3],[3]]
Constraints:
The number of the nodes in the tree will be in the range [1, 10^4]
-200 <= Node.val <= 200
"""
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
if root is None:
return []
self.mp = {}
self.rs = []
self.preorder(root)
return self.rs
def preorder(self, root):
if root:
ls = str(root.val) + "-" + self.preorder(root.left) + "-" + self.preorder(root.right)
count = self.mp.get(ls, 0)
if count == 1:
self.rs.append(root)
self.mp[ls] = count + 1
return ls
else:
return "#" |
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a library of utilies for Swarming bot scripts
import json
import os
import tempfile
import subprocess
import sys
import time
from shutil import which
def log(msg):
'''Log the message, making sure to force flushing to stdout'''
print(msg, flush=True)
def runcmd(cmd):
'''Log and run a command, redirecting output to the system stdout and stderr.'''
print('Run command: ' + ' '.join(cmd), flush=True)
return subprocess.run(cmd, stdout=sys.stdout, stderr=sys.stderr)
def load_params(test_params, params_file='params.json', required_keys=[]):
'''Load the JSON params_file into test_params.
This overrides the test_params with the values found in params_file. The
optional required_keys is a list of keys that must be found in params_file.
'''
with open(params_file, 'r') as f:
j = json.load(f)
for k in required_keys:
if not k in j.keys():
raise UserWarning('Missing required key in params.json: {}'.format(k))
for k in j.keys():
test_params[k] = j[k]
def is_valid_json(filename):
'''Return true if filename contains valid JSON, false otherwise'''
with open(filename, 'r') as f:
try:
j = json.load(f)
except JSONDecodeError as err:
log('Invalid JSON: {}'.format(err))
return False
return True
class BotUtil:
'''Various utilities that rely on ADB. Since using different ADB commands
can lead to loosing device connection, this class takes a path to ADB in its
constructor, and makes sure to use this ADB across all commands.'''
def __init__(self, adb_path):
assert(os.path.isfile(adb_path))
self.adb_path = adb_path
self.gapit_path = ''
def adb(self, args, timeout=1):
'''Log and run an ADB command, r_patheturning a subprocess.CompletedProcess with output captured'''
cmd = [self.adb_path] + args
print('ADB command: ' + ' '.join(cmd), flush=True)
return subprocess.run(cmd, timeout=timeout, check=True, capture_output=True, text=True)
def set_gapit_path(self, gapit_path):
'''Set path to gapit, must be called once before gapit() can be used.'''
self.gapit_path = gapit_path
def gapit(self, verb, args, stdout=sys.stdout, stderr=sys.stderr):
'''Build and run gapit command. Requires gapit path to be set.'''
assert(self.gapit_path != '')
cmd = [self.gapit_path, verb]
cmd += ['-gapis-args=-adb ' + self.adb_path]
cmd += args
print('GAPIT command: ' + ' '.join(cmd), flush=True)
return subprocess.run(cmd, stdout=stdout, stderr=stderr)
def is_package_installed(self, package):
'''Check if package is installed on the device.'''
line_to_match = 'package:' + package
cmd = [self.adb_path, 'shell', 'pm', 'list', 'packages']
with tempfile.TemporaryFile(mode='w+') as tmp:
subprocess.run(cmd, timeout=2, check=True, stdout=tmp)
tmp.seek(0)
for line in tmp.readlines():
line = line.rstrip()
if line == line_to_match:
return True
return False
def install_apk(self, test_params):
'''Install the test APK
test_params is a dict where:
{
"apk": "foobar.apk", # APK file
"package": "com.example.foobar", # Package name
"force_install": true|false, # (Optional): force APK installation,
# even if the package is already found
# on the device
"install_flags": ["-g", "-t"], # (Opriotnal) list of flags to pass
# to adb install
...
}
'''
force = False
if 'force_install' in test_params.keys():
force = test_params['force_install']
# -g: grant all needed permissions, -t: accept test APK
install_flags = ['-g', '-t']
if 'install_flags' in test_params.keys():
install_flags = test_params['install_flags']
if force and self.is_package_installed(test_params['package']):
cmd = [self.adb_path, 'uninstall', test_params['package']]
log('Force install, start by uninstalling: ' + ' '.join(cmd))
subprocess.run(cmd, timeout=20, check=True, stdout=sys.stdout, stderr=sys.stderr)
if force or not self.is_package_installed(test_params['package']):
cmd = [self.adb_path, 'install']
cmd += install_flags
cmd += [test_params['apk']]
log('Install APK with command: ' + ' '.join(cmd))
# Installing big APKs can take more than a minute, but get also get
# stuck, so give a big timeout to this command.
subprocess.run(cmd, timeout=120, check=True, stdout=sys.stdout, stderr=sys.stderr)
# Sleep a bit, as the app may not be listed right after install
time.sleep(1)
else:
log('Skip install of {} because package {} is already installed.'.format(test_params['apk'], test_params['package']))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# task from the https://www.hackerrank.com/challenges/swap-case/problem
def swap_case(s):
result=''.join(map(lambda x: x.lower() if x.isupper() == True else x.upper() ,s))
return result
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
|
from .account_activity import AccountActivity
from .account import Account
from .amount_owing import AmountOwing
from .contact import Contact
from .current_account import CurrentAccount
from .skipthedishes_order import SkipTheDishesOrder
__all__ = [
'AccountActivity',
'Account',
'AmountOwing',
'Contact',
'CurrentAccount',
'SkipTheDishesOrder',
]
|
from keris.callbacks import Callback
from time import time
class ModelCheckpoint(Callback):
def __init__(self, filepath, monitor='val_loss', save_best_only=False):
if monitor not in ['train_loss', 'train_acc', 'val_loss', 'val_acc']:
raise ValueError('metric to monitor is not available')
self.filepath = filepath
self.monitor = monitor
self.save_best_only = save_best_only
self.best = None
self.mode = 'max' if monitor in ['train_acc', 'val_acc'] else 'min'
def on_epoch_end(self, epoch, logs=None):
if not self.save_best_only:
self._save_model(epoch)
best, mode = self.best, self.mode
metric = logs[self.monitor]
if best is None:
self.best = metric
self._save_model(epoch)
return
is_best = metric > best if mode == 'max' else best > metric
if is_best:
self.best = metric
self._save_model(epoch)
def _save_model(self, epoch):
filepath = '%s-%d-%f' % (self.filepath, epoch, time())
self.model.save(filepath)
|
from io import BytesIO
from typing import Any, Dict, Type, TypeVar, Union
import attr
from ..types import UNSET, File, Unset
T = TypeVar("T", bound="BodyUploadFileTestsUploadPost")
@attr.s(auto_attribs=True)
class BodyUploadFileTestsUploadPost:
""" """
some_file: File
some_string: Union[Unset, str] = "some_default_string"
def to_dict(self) -> Dict[str, Any]:
some_file = self.some_file.to_tuple()
some_string = self.some_string
field_dict: Dict[str, Any] = {}
field_dict.update(
{
"some_file": some_file,
}
)
if some_string is not UNSET:
field_dict["some_string"] = some_string
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
some_file = File(payload=BytesIO(d.pop("some_file")))
some_string = d.pop("some_string", UNSET)
body_upload_file_tests_upload_post = cls(
some_file=some_file,
some_string=some_string,
)
return body_upload_file_tests_upload_post
|
import tm1637
from machine import Pin
tm = tm1637.TM1637(clk=Pin(0), dio=Pin(1))
tm.numbers(12, 59)
tm.number(-123)
# tm.temperature(24) 不建议使用显示温度
# all LEDS on "88:88"
tm.write([127, 255, 127, 127])
# all LEDS off
tm.write([0, 0, 0, 0])
#一个列表每个代表一个led晶体管
# 数字8 由7个led管组成
# 8: 由8个led管组成 |
from aenum import Enum
from peewee import CharField
class NodeState(Enum):
ONLINE = "online"
OFFLINE = "offline"
SUSPENDED = "suspended"
class NodeStateField(CharField):
def db_value(self, value):
return value.value
def python_value(self, value):
return NodeState(value)
|
from gym_minigrid.minigrid import Cell, Grid, MiniGridEnv
from gym_minigrid.entities import Ball, Box, Door, Key, COLORS, make
def _reject_next_to(env, pos):
"""
Function to filter out object positions that are right next to
the agent's starting point
"""
si, sj = env.agent.pos
i, j = pos
d = abs(si - i) + abs(sj - j)
return d < 2
class Room(object):
def __init__(self, top, size):
# Top-left corner and size (tuples)
self.top = top
self.size = size
# List of door objects and door positions
# Order of the doors is right, down, left, up
self.doors = {'right': None, 'down': None, 'left': None, 'up': None}
self.door_pos = {'right': None, 'down': None, 'left': None, 'up': None}
# List of rooms adjacent to this one
# Order of the neighbors is right, down, left, up
self.neighbors = {'right': None, 'down': None, 'left': None, 'up': None}
# Indicates if this room is behind a locked door
self.locked = False
# List of objects contained
self.objs = []
class RoomGrid(MiniGridEnv):
"""
Environment with multiple rooms and random objects.
This is meant to serve as a base class for other environments.
"""
ORIENTATIONS = ['right', 'down', 'left', 'up']
def __init__(
self,
room_size=7,
num_rows=3,
num_cols=3,
max_steps=100,
**kwargs
):
assert room_size > 0
assert room_size >= 3
assert num_rows > 0
assert num_cols > 0
self.room_size = room_size
self.num_rows = num_rows
self.num_cols = num_cols
height = (room_size - 1) * num_rows + 1
width = (room_size - 1) * num_cols + 1
# By default, this environment has no mission
self.mission = ''
super().__init__(
width=width,
height=height,
max_steps=max_steps,
**kwargs
)
def room_from_pos(self, i, j):
"""Get the room a given position maps to"""
assert i >= 0
assert j >= 0
i //= (self.room_size - 1)
j //= (self.room_size - 1)
assert i < self.num_rows
assert j < self.num_cols
return self.room_grid[i][j]
def _gen_grid(self, height, width):
# Create the grid
self.grid = Grid(height, width)
self.room_grid = []
# For each row of rooms
for i in range(0, self.num_rows):
row = []
# For each column of rooms
for j in range(0, self.num_cols):
room = Room(
(i * (self.room_size - 1), j * (self.room_size - 1)),
(self.room_size, self.room_size)
)
row.append(room)
# Generate the walls for this room
self.wall_rect(*room.top, *room.size)
self.room_grid.append(row)
# For each row of rooms
for i in range(0, self.num_rows):
# For each column of rooms
for j in range(0, self.num_cols):
room = self.room_grid[i][j]
i_l, j_l = (room.top[0] + 1, room.top[1] + 1)
i_m, j_m = (room.top[0] + room.size[0] - 1, room.top[1] + room.size[1] - 1)
# Door positions
if j < self.num_cols - 1:
room.neighbors['right'] = self.room_grid[i][j + 1]
room.door_pos['right'] = (self.rng.randint(i_l, i_m), j_m)
if i < self.num_rows - 1:
room.neighbors['down'] = self.room_grid[i + 1][j]
room.door_pos['down'] = (i_m, self.rng.randint(j_l, j_m))
if j > 0:
room.neighbors['left'] = self.room_grid[i][j - 1]
room.door_pos['left'] = room.neighbors['left'].door_pos['right']
if i > 0:
room.neighbors['up'] = self.room_grid[i - 1][j]
room.door_pos['up'] = room.neighbors['up'].door_pos['down']
# The agent starts in the middle, facing right
self.agent.pos = (
(self.num_rows // 2) * (self.room_size - 1) + (self.room_size // 2),
(self.num_cols // 2) * (self.room_size - 1) + (self.room_size // 2)
)
self.agent.state = 'right'
def place_in_room(self, i, j, obj):
"""
Add an existing object to room (i, j)
"""
room = self.room_grid[i][j]
self.place_obj(
obj,
room.top,
room.size,
reject_fn=_reject_next_to,
max_tries=1000
)
room.objs.append(obj)
def add_object(self, i, j, kind=None, color=None):
"""
Add a new object to room (i, j)
"""
if kind is None:
kind = self.rng.choice(['key', 'ball', 'box'])
if color is None:
color = self.rng.choice(COLORS)
obj = make(kind, color)
self.place_in_room(i, j, obj)
return obj
def add_door(self, i, j, door_idx=None, color=None, locked=None):
"""
Add a door to a room, connecting it to a neighbor
"""
room = self.room_grid[i][j]
if door_idx is None:
# Need to make sure that there is a neighbor along this wall
# and that there is not already a door
while True:
door_idx = self.rng.choice(self.ORIENTATIONS)
if room.neighbors[door_idx] and room.doors[door_idx] is None:
break
if room.doors[door_idx] is not None:
raise IndexError(f'door {door_idx} already exists')
if color is None:
color = self.rng.choice(COLORS)
if locked is None:
locked = self.rng.rand() > .5
room.locked = locked
door = Door(color, state='locked' if locked else 'closed')
pos = room.door_pos[door_idx]
self[pos] = door
room.doors[door_idx] = door
room.neighbors[door_idx].doors[self._door_idx(door_idx, 2)] = door
return door
def _door_idx(self, door_idx, offset):
idx = self.ORIENTATIONS.index(door_idx)
door_idx = self.ORIENTATIONS[(idx + offset) % len(self.ORIENTATIONS)]
return door_idx
def remove_wall(self, i, j, wall_idx):
"""
Remove a wall between two rooms
"""
room = self.room_grid[i][j]
if room.doors[wall_idx] is not None:
raise ValueError('door exists on this wall')
if not room.neighbors[wall_idx]:
raise ValueError(f'invalid wall: {wall_idx}')
neighbor = room.neighbors[wall_idx]
ti, tj = room.top
w, h = room.size
# Ordering of walls is right, down, left, up
if wall_idx == 'right':
for i in range(1, h - 1):
self[ti + i, tj + w - 1].clear()
elif wall_idx == 'down':
for j in range(1, w - 1):
self[ti + h - 1, tj + j].clear()
elif wall_idx == 'left':
for i in range(1, h - 1):
self[ti + i, tj].clear()
elif wall_idx == 'up':
for j in range(1, w - 1):
self[ti, tj + j].clear()
else:
raise ValueError(f'invalid wall: {wall_idx}')
# Mark the rooms as connected
room.doors[wall_idx] = True
neighbor.doors[self._door_idx(wall_idx, 2)] = True
def place_agent(self, i=None, j=None, rand_dir=True):
"""
Place the agent in a room
"""
if i is None:
i = self.rng.randint(self.num_rows)
if j is None:
j = self.rng.randint(self.num_cols)
room = self.room_grid[i][j]
# Find a position that is not right in front of an object
while True:
super().place_agent(top=room.top, size=room.size, rand_dir=rand_dir, max_tries=1000)
front_cell = self[self.agent.front_pos]
if front_cell.entity is None or front_cell.entity.type == 'wall':
break
else:
self[self.agent.pos].clear()
return self.agent.pos
def connect_all(self, door_colors=COLORS, max_itrs=5000):
"""
Make sure that all rooms are reachable by the agent from its
starting position
"""
start_room = self.room_from_pos(*self.agent.pos)
added_doors = []
def find_reach():
reach = set()
stack = [start_room]
while len(stack) > 0:
room = stack.pop()
if room in reach:
continue
reach.add(room)
for ori in self.ORIENTATIONS:
if room.doors[ori]:
stack.append(room.neighbors[ori])
return reach
num_itrs = 0
while True:
# This is to handle rare situations where random sampling produces
# a level that cannot be connected, producing in an infinite loop
if num_itrs > max_itrs:
raise RecursionError('connect_all failed')
num_itrs += 1
# If all rooms are reachable, stop
reach = find_reach()
if len(reach) == self.num_rows * self.num_cols:
break
# Pick a random room and door position
i = self.rng.randint(0, self.num_rows)
j = self.rng.randint(0, self.num_cols)
k = self.rng.choice(self.ORIENTATIONS)
room = self.room_grid[i][j]
# If there is already a door there, skip
if not room.door_pos[k] or room.doors[k]:
continue
if room.locked or room.neighbors[k].locked:
continue
color = self.rng.choice(door_colors)
door = self.add_door(i, j, k, color, False)
added_doors.append(door)
return added_doors
def add_distractors(self, i=None, j=None, num_distractors=10, all_unique=True):
"""
Add random objects that can potentially distract/confuse the agent.
"""
raise NotImplementedError
# Collect a list of existing objects
objs = []
for row in self.room_grid:
for room in row:
for obj in room.objs:
objs.append((obj.type, obj.color))
# List of distractors added
dists = []
while len(dists) < num_distractors:
color = self.rng.choice(COLORS)
type = self.rng.choice(['key', 'ball', 'box'])
obj = (type, color)
if all_unique and obj in objs:
continue
# Add the object to a random room if no room specified
room_i = i
room_j = j
if room_i is None:
room_i = self.rng.randint(0, self.num_rows)
if room_j is None:
room_j = self.rng.randint(0, self.num_cols)
dist, pos = self.add_object(room_i, room_j, *obj)
objs.append(obj)
dists.append(dist)
return dists
|
from scipy import signal
import os
import cv2
import numpy as np
import math
class Compare():
def correlation(self, img1, img2):
return signal.correlate2d (img1, img2)
def meanSquareError(self, img1, img2):
error = np.sum((img1.astype('float') - img2.astype('float')) ** 2)
error /= float(img1.shape[0] * img1.shape[1]);
return error
def psnr(self, img1, img2):
mse = self.meanSquareError(img1,img2)
if mse == 0:
return 100
PIXEL_MAX = 255.0
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
compare = Compare()
original_image_path = input("Enter name of original image with extension: \n")
encoded_image_path = input("Enter name of incoded image with extension: \n")
orig_image = cv2.imread("Original_image/"+original_image_path, cv2.COLOR_BGR2RGB)
enc_image = cv2.imread("Encoded_image/"+encoded_image_path, cv2.COLOR_BGR2RGB)
orig_image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2GRAY)
enc_image = cv2.cvtColor(enc_image, cv2.COLOR_BGR2GRAY)
print("-----------------------------------------------------------------------")
print("Correlation: {}".format(compare.correlation(orig_image, enc_image)))
print("-----------------------------------------------------------------------")
print("Mean Square Error: {}".format(compare.meanSquareError(orig_image, enc_image)))
print("-----------------------------------------------------------------------")
print("Peak Signal to Noise Ratio(PSNR) {}".format(compare.psnr(orig_image, enc_image)))
print("-----------------------------------------------------------------------")
|
from rucio.client.rseclient import RSEClient
import uuid
c = RSEClient(account="ivm")
for rse in c.list_rses():
u = uuid.UUID(rse["id"])
print rse["rse"], u.hex
|
def get_parents(o):
parents = []
while orbits[o] != "":
o = orbits[o]
parents.append(o)
return parents
def create_orbits(orbit_map):
pairs = [pair.split(")") for pair in orbit_map.split("\n")]
objects = set(i for p in pairs for i in p)
orbits = {o: "" for o in objects}
for i, j in pairs:
orbits[j] = i
return orbits
orbit_map = open("day6-input.txt", "r").read()
orbits = create_orbits(orbit_map)
total = sum(map(lambda o: len(get_parents(o)), orbits.keys()))
print(total)
# Part 2
# Start from the object YOU is orbiting at, to the object SAN is orbiting at
# So there is no need to add 1 to make up for the one common object removed with symmetric difference
print(len(set(get_parents("YOU")).symmetric_difference(get_parents("SAN"))))
|
from multiprocessing import Queue, Value
from access_face_vision import access_logger
from time import sleep
from access_face_vision.face_detector import FaceDetector
from access_face_vision.face_encoder import FaceEncoder
from access_face_vision.source.image_reader import ImageReader
from access_face_vision.embedding_generator import EmbeddingGenerator
from access_face_vision import utils
def train_face_recognition_model(cmd_args, logger, log_que):
kill_app = Value('i', 0)
camera_out_que = Queue()
detector_out_que = Queue()
encoder_out_que = Queue()
dir_reader = ImageReader(cmd_args, camera_out_que, log_que, 'info', kill_app, True)
face_detector = FaceDetector(cmd_args, camera_out_que, detector_out_que, log_que, 'info', kill_app, True)
face_encoder = FaceEncoder(cmd_args, detector_out_que, encoder_out_que, log_que, 'info', kill_app, True)
embed_gen = EmbeddingGenerator(cmd_args, encoder_out_que, log_que, 'info', kill_app, True)
face_detector.start()
face_encoder.start()
embed_gen.start()
dir_reader.start()
while kill_app.value != 1:
sleep(0.2)
if __name__ == '__main__':
cmd_args = utils.create_parser()
logger, log_que, que_listener = access_logger.set_main_process_logger(cmd_args.log,
cmd_args.log_screen,
cmd_args.log_file)
que_listener.start()
train_face_recognition_model(cmd_args, logger, log_que)
que_listener.stop() |
"""Workflow module logic"""
import os
import logging
import contextlib
from abc import ABC, abstractmethod
from pathlib import Path
from haddock.core.defaults import MODULE_PATH_NAME, MODULE_IO_FILE
from haddock.core.exceptions import StepError
from haddock.gear.config_reader import read_config
from haddock.libs.libontology import ModuleIO
logger = logging.getLogger(__name__)
modules_folder = Path(__file__).resolve().parent
_folder_match_regex = '[a-zA-Z]*/'
modules_category = {
module.name: category.name
for category in modules_folder.glob(_folder_match_regex)
for module in category.glob(_folder_match_regex)
}
"""Indexes each module in its specific category. Keys are Paths to the module,
values are their categories. Categories are the modules parent folders."""
general_parameters_affecting_modules = {'ncores', 'cns_exec'}
"""These parameters are general parameters that may be applicable to modules
specifically. Therefore, they should be considered as part of the "default"
module's parameters. Usually, this set is used to filter parameters during
the run prepraration phase. See, `gear.prepare_run`."""
class BaseHaddockModule(ABC):
def __init__(self, order, path, params, cns_script=""):
"""
Base class for any HADDOCK module
Parameters
----------
params : dict or path to HADDOCK3 configuration file
A dictionary or a path to an HADDOCK3 configuration file
containing the initial module parameters. Usually this is
defined by the default params.
"""
self.order = order
self.path = path
self.previous_io = self._load_previous_io()
if cns_script:
self.cns_folder_path = cns_script.resolve().parent
self.cns_protocol_path = cns_script
self.params = params
try:
with open(self.cns_protocol_path) as input_handler:
self.recipe_str = input_handler.read()
except FileNotFoundError:
_msg = f"Error while opening workflow {self.cns_protocol_path}"
raise StepError(_msg)
except AttributeError:
# No CNS-like module
pass
@property
def params(self):
return self._params
@params.setter
def params(self, path_or_dict):
if isinstance(path_or_dict, dict):
self._params = path_or_dict
else:
try:
self._params = read_config(path_or_dict)
except FileNotFoundError as err:
_msg = f"Default configuration file not found: {str(path_or_dict)!r}"
raise FileNotFoundError(_msg) from err
except TypeError as err:
_msg = (
"Argument does not satisfy condition, must be path or "
f"dict. {type(path_or_dict)} given."
)
raise TypeError(_msg) from err
@abstractmethod
def run(self, params):
self.update_params(**params)
self.params.setdefault('ncores', None)
self.params.setdefault('cns_exec', None)
@classmethod
@abstractmethod
def confirm_installation(self):
"""
Confirm the third-party software needed for the module is installed.
HADDOCK3's own modules should just return.
"""
return
def finish_with_error(self, message=""):
if not message:
message = "Module has failed"
logger.error(message)
raise SystemExit
def _load_previous_io(self):
if self.order == 0:
return ModuleIO()
io = ModuleIO()
previous_io = self.previous_path() / MODULE_IO_FILE
if previous_io.is_file():
io.load(previous_io)
return io
def previous_path(self):
previous = sorted(list(self.path.resolve().parent.glob('[0-9][0-9]*/')))
try:
return previous[self.order - 1]
except IndexError:
return self.path
def update_params(self, **parameters):
"""Update defaults parameters with run-specific parameters."""
self._params.update(parameters)
@contextlib.contextmanager
def working_directory(path):
"""Changes working directory and returns to previous on exit"""
prev_cwd = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
|
# Generated by Django 2.0 on 2018-02-14 13:24
from django.db import migrations
def forwards(apps, schema_editor):
"""
Change Events with kind 'movie' to 'cinema'
and Events with kind 'play' to 'theatre'.
Purely for more consistency.
"""
Event = apps.get_model("spectator_events", "Event")
for ev in Event.objects.filter(kind="movie"):
ev.kind = "cinema"
ev.save()
for ev in Event.objects.filter(kind="play"):
ev.kind = "theatre"
ev.save()
class Migration(migrations.Migration):
dependencies = [
("spectator_events", "0034_auto_20180208_1618"),
]
operations = [
migrations.RunPython(forwards),
]
|
from beluga.visualization import BelugaPlot
from beluga.visualization.datasources import Dill
# plots = BelugaPlot('./data.dill',default_sol=-1,default_step=-1)
ds = Dill('./data.dill')
# ds2 = Dill('./phu_2k5_eps4.dill')
plots = BelugaPlot(datasource=ds,default_sol=-1,default_step=-1, renderer='matplotlib')
# plots = BelugaPlot('./phu_2k5_eps4.dill',default_sol=-1,default_step=-1, renderer='bokeh')
#
# plots.add_plot().line_series('theta*re/1000','h/1000', step=-1, skip=3) \
# .xlabel('Downrange (km)').ylabel('h (km)') \
# .title('Altitude vs. Downrange')
plots.add_plot().line('theta*re/1000','h/1000',datasource=ds,label='DS1') \
.xlabel('Downrange (km)').ylabel('h (km)') \
.title('Altitude vs. Downrange')
# .line('theta*re/1000','h/1000',datasource=ds2,label='DS2') \
#
# plots.add_plot().line('v/1000','h/1000') \
# .xlabel('v (km/s)').ylabel('h (km)') \
# .title('Altitude vs. Velocity')
#
plots.add_plot().line('t','gam*180/3.14') \
.xlabel('t (s)').ylabel('fpa (degrees)') \
.title('FPA vs. Time')
plots.add_plot().line('t','alfa*180/3.14') \
.xlabel('t (s)').ylabel('alfa (degrees)') \
.title('Angle of attack vs. Time')
# plots.add_plot().line_series('v/1000','h/1000', step=-1, skip=9) \
# .xlabel('v (km/s)').ylabel('h (km)') \
# .title('Altitude vs. Velocity')
# plots.add_plot().line_series('t','alfa*180/3.14', step=-1, skip=9) \
# .xlabel('t (s)').ylabel('alfa (degrees)') \
# .title('Angle of attack vs. Time')
# plots.add_plot().line('t','alfa*180/3.14') \
# .xlabel('t (s)').ylabel('alfa (degrees)') \
# .title('Angle of attack vs. Time')
plots.render()
|
#
# PySNMP MIB module CISCO-LWAPP-SI-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-LWAPP-SI-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:06:24 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion")
cLApDot11IfSlotId, ciscoLwappSpectrum, cLApSysMacAddress, cLApName = mibBuilder.importSymbols("CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId", "ciscoLwappSpectrum", "cLApSysMacAddress", "cLApName")
CLDot11Band, = mibBuilder.importSymbols("CISCO-LWAPP-TC-MIB", "CLDot11Band")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Gauge32, Bits, MibIdentifier, Counter32, IpAddress, Counter64, iso, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, TimeTicks, Integer32, Unsigned32, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Bits", "MibIdentifier", "Counter32", "IpAddress", "Counter64", "iso", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "TimeTicks", "Integer32", "Unsigned32", "ObjectIdentity")
TruthValue, MacAddress, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "MacAddress", "DisplayString", "TextualConvention")
ciscoLwappSiMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1))
ciscoLwappSiMIB.setRevisions(('2015-05-18 00:00', '2011-10-01 00:00', '2011-05-16 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoLwappSiMIB.setRevisionsDescriptions(('Added following NOTIFICATION-GROUP - ciscoLwappSiMIBNotifGroupRev1. Added new compliance - ciscoLwappApSiMIBComplianceRev2. Updated the description of following obejcts - cLSiD11AqiTrapThreshold - cLSiD11IdrUnclassifiedTrapEnable - cLSiD11IdrUnclassifiedTrapThreshold - cLSiApIfSensordErrorCode Updated following notifications - ciscoLwappSiAqLow - ciscoLwappSiIdrDevice Deprecated following NOTIFICATION-GROUP - ciscoLwappSiMIBNotifGroup.', "Added following objects to existing table, cLSiDot11BandEntry: cLSiD11IdrPersistentDevicePropagation cLSiD11IdrUnclassifiedTrapEnable cLSiD11IdrUnclassifiedTrapThreshold. Added following object to existing table, cLSiDot11BandEventDrivenRrmEntry: cLSiD11EventDrivenRrmCustomThresVal. Added one more enumeration('custom') for custom configuration for the existing variable, cLSiD11EventDrivenRrmThresLvl. Added the notification variable, cLSiD11IdrUnclassifiedCurrentSevIndex. Added one more notification, ciscoLwappSiAqLowSeverityHigh.", 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoLwappSiMIB.setLastUpdated('201505180000Z')
if mibBuilder.loadTexts: ciscoLwappSiMIB.setOrganization('Cisco Systems Inc.')
if mibBuilder.loadTexts: ciscoLwappSiMIB.setContactInfo('Cisco Systems, Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS Email: cs-snmp@cisco.com')
if mibBuilder.loadTexts: ciscoLwappSiMIB.setDescription("This MIB module defines objects that describes the configuration and status of the spectrum intelligence capabilities of the 802.11 Access points. This MIB is intended to be implemented on all those devices operating as Central Controllers (CC) that terminate the Light Weight Access Point Protocol tunnel from Light-weight LWAPP Access Points. The relationship between CC and the LWAPP APs can be depicted as follows: +......+ +......+ +......+ +......+ + + + + + + + + + CC + + CC + + CC + + CC + + + + + + + + + +......+ +......+ +......+ +......+ .. . . . .. . . . . . . . . . . . . . . . . . . . . . . . +......+ +......+ +......+ +......+ +......+ + + + + + + + + + + + AP + + AP + + AP + + AP + + AP + + + + + + + + + + + +......+ +......+ +......+ +......+ +......+ . . . . . . . . . . . . . . . . . . . . . . . . +......+ +......+ +......+ +......+ +......+ + + + + + + + + + + + MN + + MN + + MN + + MN + + MN + + + + + + + + + + + +......+ +......+ +......+ +......+ +......+ The LWAPP tunnel exists between the controller and the APs. The MNs communicate with the APs through the protocol defined by the 802.11 standard. LWAPP APs, upon bootup, discover and join one of the controllers and the controller pushes the configuration, that includes the WLAN parameters, to the LWAPP APs. The APs then encapsulate all the 802.11 frames from wireless clients inside LWAPP frames and forward the LWAPP frames to the controller. GLOSSARY Access Point ( AP ) An entity that contains an 802.11 medium access control ( MAC ) and physical layer ( PHY ) interface and provides access to the distribution services via the wireless medium for associated clients. LWAPP APs encapsulate all the 802.11 frames in LWAPP frames and sends it to the controller to which it is logically connected. Basic Service Set Identifier (BSSID) The identifier for the service set comprising of all the 802.11 stations under the control of one coordinating Access Point. This identifier happens to be the MAC address of the dot11 radio interface of the Access Point. The wireless clients that associate with the Access Point get the wired uplink through this particular dot11 interface. Central Controller ( CC ) The central entity that terminates the LWAPP protocol tunnel from the LWAPP APs. Throughout this MIB, this entity also referred to as 'controller'. Light Weight Access Point Protocol ( LWAPP ) This is a generic protocol that defines the communication between the Access Points and the Central Controller. Mobile Node ( MN ) A roaming 802.11 wireless device in a wireless network associated with an access point. Station Management (SMT) This term refers to the internal management of the 802.11 protocol operations by the AP to work cooperatively with the other APs and 802.11 devices in the network. Spectrum Intelligence (SI) Radio frequency (RF) interference from devices operating in the unlicensed 2.4-GHz and 5-GHz bands used by wireless LANs (WLANs) is a growing concern for organizations deploying indoor and outdoor wireless networks. A variety of RF devices are now available that can cause interference, including cordless phones, Bluetooth devices, cameras, paging systems, unauthorized access points, and clients in ad-hoc mode. Left unaddressed, RF interference can result in low data rates and throughput, lack of sufficient WLAN coverage, WLAN performance degradation, poor voice quality, and low end-user satisfaction. This, in turn, can lead to decreased network capacity, an increase in support calls, network downtime, rising operational costs, and potential security vulnerabilities from malicious interference. Spectrum Intelligence, industry-leading solution from Cisco detects, classifies, and locates devices causing RF interference in the unlicensed 2.4-GHz and 5-GHz bands. When the source of the interference is determined, customers can remove, move, shield, adjust, or replace the interference source. This helps organizations troubleshoot the wireless network to determine the root causes of interference problems and optimize network performance. Sensord The Sensord software looks at the timing and frequency of interference bursts, and the discovered attributes of the bursts such as the modulation type and identified sync words. This high-level information is then used to perform the final identification and separation of one device from another. This final classification step provides the powerful features of SI: Identifying the specific source of the interference, where it is located, and how it can be mitigated. Persistent Device Propagation Interference devices usually affect multiple wireless access points. Using persistent device propagation information regarding persistent interference devices can be passed from one access point to another access point effectively improving radio resource management. Radio Resource Management ( RRM ) RRM is the system level control of co-channel interference and other radio transmission characteristics in wireless communication systems. REFERENCE [1] Part 11 Wireless LAN Medium Access Control ( MAC ) and Physical Layer ( PHY ) Specifications. [2] Draft-obara-capwap-lwapp-00.txt, IETF Light Weight Access Point Protocol.")
ciscoLwappSiMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 0))
ciscoLwappSiMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1))
ciscoLwappSiMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2))
ciscoLwappSiMIBNotifObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 3))
ciscoLwappAirQuality = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1))
ciscoLwappInterference = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2))
ciscoLwappSiDot11Band = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3))
ciscoLwappSiApIf = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 4))
cLSiApIfTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 4, 1), )
if mibBuilder.loadTexts: cLSiApIfTable.setStatus('current')
if mibBuilder.loadTexts: cLSiApIfTable.setDescription('This table represents the information about the air quality parameters corresponding to the dot11 interfaces of the APs that have joined the controller.')
cLSiApIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 4, 1, 1), ).setIndexNames((0, "CISCO-LWAPP-AP-MIB", "cLApSysMacAddress"), (0, "CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId"))
if mibBuilder.loadTexts: cLSiApIfEntry.setStatus('current')
if mibBuilder.loadTexts: cLSiApIfEntry.setDescription('An entry in this table represents the 802.11 AQ parameters of a channel on a dot11 interface of an AP that has joined the controller.')
cLSiApIfSpectrumIntelligenceEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 4, 1, 1, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiApIfSpectrumIntelligenceEnable.setStatus('current')
if mibBuilder.loadTexts: cLSiApIfSpectrumIntelligenceEnable.setDescription("This object indicates whether Spectrum Intelligence (SI) is enabled on this radio. A value of 'true' indicates SI is enabled. A value of 'false' indicates SI is disabled.")
cLSiApIfSpectrumCapable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 4, 1, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiApIfSpectrumCapable.setStatus('current')
if mibBuilder.loadTexts: cLSiApIfSpectrumCapable.setDescription("This object indicates whether Spectrum Intelligence (SI) can be enabled on this radio. A value of 'true' indicates SI can be enabled. A value of 'false' indicates SI cannot be enabled.")
cLSiApIfRapidUpdateEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 4, 1, 1, 3), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiApIfRapidUpdateEnable.setStatus('current')
if mibBuilder.loadTexts: cLSiApIfRapidUpdateEnable.setDescription("This object indicates whether rapid update is enabled on this radio. A value of 'true' indicates Rapid update is enabled. A value of 'false' indicates Rapid Update is disabled.")
cLSiApIfDetailSpectrumModeEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 4, 1, 1, 4), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiApIfDetailSpectrumModeEnable.setStatus('current')
if mibBuilder.loadTexts: cLSiApIfDetailSpectrumModeEnable.setDescription("This object indicates whether detailed spectrum mode is enabled on this radio. A value of 'true' indicates detailed spectrum mode is enabled. A value of 'false' indicates detailed spectrum mode is disabled.")
cLSiApIfSensordOperationalStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 4, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("notApplicable", 3))).clone('notApplicable')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiApIfSensordOperationalStatus.setStatus('current')
if mibBuilder.loadTexts: cLSiApIfSensordOperationalStatus.setDescription('This object indicates the current operational status of the Sensord')
cLSiApIfNumOfSeActiveConnection = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 4, 1, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiApIfNumOfSeActiveConnection.setStatus('current')
if mibBuilder.loadTexts: cLSiApIfNumOfSeActiveConnection.setDescription('This object indicates the current number of active spectrum expert(SE) connections per slot of AP.')
cLSiApIfSensordErrorCode = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 4, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 129, 130))).clone(namedValues=NamedValues(("configured", 1), ("invalidSIConfig", 2), ("apNonCleanAirMode", 3), ("failedChannelConfig", 4), ("failedResourceAllocation", 5), ("failedConnectionWithSensor", 6), ("radioNotCleanAirCapable", 7), ("failedSIStream", 8), ("radioDisabled", 9), ("recoverableError", 129), ("unrecoverableCrash", 130)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiApIfSensordErrorCode.setStatus('current')
if mibBuilder.loadTexts: cLSiApIfSensordErrorCode.setDescription('This object indicates the error code of the Sensord: configured(1) Configured. invalidSIConfig(2) Invalid SI configuration. apNonCleanAirMode(3) AP not in CleanAir mode. failedChannelConfig(4) Could not get channel configuration. failedResourceAllocation(5) Resource allocation failure. failedConnectionWithSensor(6) Could not establish connection with sensor. radioNotCleanAirCapable(7) Radio is not CleanAir capable. failedSIStream(8) Could not create SI streams. radioDisabled(9) Radio disabled. recoverableError(129) Recoverable error and AP will reset itself. unrecoverableCrash(130) Sensord crashed.')
cLSiDot11BandTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1), )
if mibBuilder.loadTexts: cLSiDot11BandTable.setStatus('current')
if mibBuilder.loadTexts: cLSiDot11BandTable.setDescription('This table represents the information about the air quality parameters corresponding to the dot11 band of the APs that have joined the controller.')
cLSiDot11BandEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-LWAPP-SI-MIB", "cLSiD11Band"))
if mibBuilder.loadTexts: cLSiDot11BandEntry.setStatus('current')
if mibBuilder.loadTexts: cLSiDot11BandEntry.setDescription('An entry in this table represents the AQ parameters on a dot11 band of an AP that has joined the controller.')
cLSiD11Band = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1, 1, 1), CLDot11Band())
if mibBuilder.loadTexts: cLSiD11Band.setStatus('current')
if mibBuilder.loadTexts: cLSiD11Band.setDescription('This object represents the band for this entry.')
cLSiD11SpectrumIntelligenceEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1, 1, 2), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11SpectrumIntelligenceEnable.setStatus('current')
if mibBuilder.loadTexts: cLSiD11SpectrumIntelligenceEnable.setDescription("This object indicates whether Spectrum Intelligence (SI) is enabled on this band. A value of 'true' indicates SI is enabled. A value of 'false' indicates SI is disabled.")
cLSiD11InterferenceDeviceList = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1, 1, 3), Bits().clone(namedValues=NamedValues(("wiMaxFixed", 0), ("wiMaxMobile", 1), ("xbox", 2), ("canopy", 3), ("radar", 4), ("superAg", 5), ("wifiInvalidChannel", 6), ("wifiInverted", 7), ("eightZeroTwoDot15dot4", 8), ("videoCamera", 9), ("dectLikePhone", 10), ("continuousTransmitter", 11), ("jammer", 12), ("tddTransmitter", 13), ("bluetoothDiscovery", 14), ("eightZeroTwoDot11Fh", 15), ("microwaveOven", 16), ("bluetoothLink", 17)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11InterferenceDeviceList.setStatus('current')
if mibBuilder.loadTexts: cLSiD11InterferenceDeviceList.setDescription('This object represents the interference device list which would be considered for detection by controller on the corresponding radio interface. If bit corresponding to particular interference device category is set, if existing in radio interface, the same would be detected and reported. If bit corresponding to particular interference device category is cleared, the same would not be considered for detection and reporting, even though existing in particular radio interface.')
cLSiD11PollingInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(15, 60))).setUnits('minutes').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11PollingInterval.setStatus('current')
if mibBuilder.loadTexts: cLSiD11PollingInterval.setDescription('This object represents the Air Quality polling interval.')
cLSiD11IdrReportingEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1, 1, 6), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11IdrReportingEnable.setStatus('current')
if mibBuilder.loadTexts: cLSiD11IdrReportingEnable.setDescription("This object indicates whether IDR is enabled on this band. A value of 'true' indicates IDR is enabled. A value of 'false' indicates IDR is disabled.")
cLSiD11AqiTrapEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1, 1, 7), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11AqiTrapEnable.setStatus('current')
if mibBuilder.loadTexts: cLSiD11AqiTrapEnable.setDescription("This object indicates whether AQ notification is enabled on this band. A value of 'true' indicates AQ notification is enabled. A value of 'false' indicates AQ notification is disabled.")
cLSiD11AqiTrapThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11AqiTrapThreshold.setStatus('current')
if mibBuilder.loadTexts: cLSiD11AqiTrapThreshold.setDescription('This object represents the threshold value for the trap, ciscoLwappSiAqLowRev1.')
cLSiD11IdrTrapEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1, 1, 9), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11IdrTrapEnable.setStatus('current')
if mibBuilder.loadTexts: cLSiD11IdrTrapEnable.setDescription("This object indicates whether IDR notification is enabled on this band. A value of 'true' indicates IDR notification is enabled. A value of 'false' indicates IDR notification is disabled.")
cLSiD11IdrTrapDeviceList = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1, 1, 10), Bits().clone(namedValues=NamedValues(("wiMaxFixed", 0), ("wiMaxMobile", 1), ("xbox", 2), ("canopy", 3), ("radar", 4), ("superAg", 5), ("wifiInvalidChannel", 6), ("wifiInverted", 7), ("eightZeroTwoDot15dot4", 8), ("videoCamera", 9), ("dectLikePhone", 10), ("continuousTransmitter", 11), ("jammer", 12), ("tddTransmitter", 13), ("bluetoothDiscovery", 14), ("eightZeroTwoDot11Fh", 15), ("microwaveOven", 16), ("bluetoothLink", 17)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11IdrTrapDeviceList.setStatus('current')
if mibBuilder.loadTexts: cLSiD11IdrTrapDeviceList.setDescription('This object represents the interference device list, that are enabled to generate traps. When bit corresponding to particular interference category is set, when detected by controller, trap indicating the detection of this interference device would be sent. And if bit corresponding to particular interference category is cleared, when detected by controller, trap indicating the detection of this interference device would not be sent.')
cLSiD11IdrPersistentDevicePropagation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1, 1, 11), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11IdrPersistentDevicePropagation.setStatus('current')
if mibBuilder.loadTexts: cLSiD11IdrPersistentDevicePropagation.setDescription("This object specifies whether Persistent Device Propagation to neighboring access points is enabled on this band or not. A value of 'true' indicates that Persistent Device Propagation is enabled. A value of 'false' indicates Persistent Device Propagation is disabled.")
cLSiD11IdrUnclassifiedTrapEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1, 1, 12), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11IdrUnclassifiedTrapEnable.setStatus('current')
if mibBuilder.loadTexts: cLSiD11IdrUnclassifiedTrapEnable.setDescription("This object specifies whether trap, ciscoLwappSiAqLowSeverityHighRev1, would be sent for unclassified interference category on this band. A value of 'true' indicates trap enabled. A value of 'false' indicates trap is disabled.")
cLSiD11IdrUnclassifiedTrapThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 1, 1, 13), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setUnits('percent').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11IdrUnclassifiedTrapThreshold.setStatus('current')
if mibBuilder.loadTexts: cLSiD11IdrUnclassifiedTrapThreshold.setDescription('This object specifies the threshold which would be considered while sending trap, ciscoLwappSiAqLowSeverityHighRev1, for unclassified interference category. When the interference severity index reaches this threshold, trap would be sent out.')
cLSiAqTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 1), )
if mibBuilder.loadTexts: cLSiAqTable.setStatus('current')
if mibBuilder.loadTexts: cLSiAqTable.setDescription('This table represents the information about the air quality parameters corresponding to the dot11 interfaces of the APs that have joined the controller for a given channel.')
cLSiAqEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 1, 1), ).setIndexNames((0, "CISCO-LWAPP-AP-MIB", "cLApSysMacAddress"), (0, "CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId"), (0, "CISCO-LWAPP-SI-MIB", "cLSiAqChannelNumber"))
if mibBuilder.loadTexts: cLSiAqEntry.setStatus('current')
if mibBuilder.loadTexts: cLSiAqEntry.setDescription('An entry describes the AQ parameters of a channel on a 802.11 radio interface of an AP. A row in this table will be created when a report is received on controller from an AP for a channel. The reports will be updated in every 15 minutes. When new reports come in, old entries would be deleted and updated with new entries.')
cLSiAqChannelNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 300)))
if mibBuilder.loadTexts: cLSiAqChannelNumber.setStatus('current')
if mibBuilder.loadTexts: cLSiAqChannelNumber.setDescription('This object indicates the channel number for which the report was received by the controller.')
cLSiAqMinIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqMinIndex.setStatus('current')
if mibBuilder.loadTexts: cLSiAqMinIndex.setDescription("This object indicates the minimum air quality Index. This value lies between 1 and 100 where value '1' indicates worst air quality and value '100' indicates best air quality.")
cLSiAqIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 1, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqIndex.setStatus('current')
if mibBuilder.loadTexts: cLSiAqIndex.setDescription("This object indicates the air quality index. This value lies between 1 and 100 where value '1' indicates worst air quality and value '100' indicates best air quality.")
cLSiAqTotalChannelPower = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-90, -60))).setUnits('dbm').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqTotalChannelPower.setStatus('current')
if mibBuilder.loadTexts: cLSiAqTotalChannelPower.setDescription('This object indicates the RSSI value for total channel power.')
cLSiAqTotalChannelDutyCycle = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 1, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setUnits('percent').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqTotalChannelDutyCycle.setStatus('current')
if mibBuilder.loadTexts: cLSiAqTotalChannelDutyCycle.setDescription('This object indicates the total channel duty cycle.')
cLSiAqInterferencePower = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-90, -60))).setUnits('dbm').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqInterferencePower.setStatus('current')
if mibBuilder.loadTexts: cLSiAqInterferencePower.setDescription('This object indicates the power or RSSI value of the interfering device. Received signal strength indicator (RSSI) is a measurement of the power present in a received radio signal.')
cLSiAqInterferenceDutyCycle = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setUnits('percent').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqInterferenceDutyCycle.setStatus('current')
if mibBuilder.loadTexts: cLSiAqInterferenceDutyCycle.setDescription('This object indicates the duty cycle of interfering device.')
cLSiAqInterferenceDeviceCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 1, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqInterferenceDeviceCount.setStatus('current')
if mibBuilder.loadTexts: cLSiAqInterferenceDeviceCount.setDescription('This object indicates the total number of interference devices identified by AP.')
cLSiAqInterfererClassReportCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 1, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqInterfererClassReportCount.setStatus('current')
if mibBuilder.loadTexts: cLSiAqInterfererClassReportCount.setDescription('This object indicates the maximum number of worst air quality interference report that will be generated per channel of a dot11 interface of an AP.')
cLSiAqTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 1, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqTimeStamp.setStatus('current')
if mibBuilder.loadTexts: cLSiAqTimeStamp.setDescription("This object indicates the time when the Air Quality(AQ) report was received by the controller. This represents number of seconds elapsed since 00:00:00 on January 1, 1970, Coordinated Universal Time (UTC). So a value of '1131362704' means 'Mon Nov 7 16:55:04 2005'.")
cLSiAqInterferenceClassReportTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 2), )
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportTable.setStatus('current')
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportTable.setDescription('This table represents the information about the worst air quality interference report on a channel of the dot11 interfaces of the APs that have joined the controller. The total number of entries are represented by cLSiAqInterfererClassReportCount object.')
cLSiAqInterferenceClassReportEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 2, 1), ).setIndexNames((0, "CISCO-LWAPP-AP-MIB", "cLApSysMacAddress"), (0, "CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId"), (0, "CISCO-LWAPP-SI-MIB", "cLSiAqChannelNumber"), (0, "CISCO-LWAPP-SI-MIB", "cLSiAqInterferenceClassReportIndex"))
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportEntry.setStatus('current')
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportEntry.setDescription('An entry describes the worst interference report generated for a channel on a 802.11 radio interface of an AP. There will be a maximum of cLSiAqInterfererClassReportCount worst air quality reports per channel on a dot11 interface of an AP.')
cLSiAqInterferenceClassReportIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 2, 1, 1), Unsigned32())
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportIndex.setStatus('current')
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportIndex.setDescription('This object indicates the category index for this report.')
cLSiAqInterferenceClassReportDeviceClass = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 8, 10, 17, 18, 19, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37))).clone(namedValues=NamedValues(("bluetoothLink", 1), ("microwaveOven", 8), ("eightZeroTwoDot11Fh", 10), ("bluetoothDiscovery", 17), ("tddTransmitter", 18), ("jammer", 19), ("continuousTransmitter", 25), ("dectLikePhone", 26), ("videoCamera", 27), ("eightZeroTwoDot15dot4", 28), ("wifiInverted", 30), ("wifiInvalidChannel", 31), ("superAg", 32), ("radar", 33), ("canopy", 34), ("microsoftDevice", 35), ("wiMaxMobile", 36), ("wiMaxFixed", 37)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportDeviceClass.setStatus('current')
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportDeviceClass.setDescription('This object indicates the device type of the identified interference devices mentioned in the report.')
cLSiAqInterferenceClassReportSeverityIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 2, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportSeverityIndex.setStatus('current')
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportSeverityIndex.setDescription("This object indicates the severity index for this report. This value lies between 1 and 100 where value '1' indicates low interference and value '100' indicated high interference.")
cLSiAqInterferenceClassReportPower = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-90, -60))).setUnits('dbm').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportPower.setStatus('current')
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportPower.setDescription('This object indicates the power of interfering device reported in worst air quality report.')
cLSiAqInterferenceClassReportDutyCycle = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 2, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setUnits('percent').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportDutyCycle.setStatus('current')
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportDutyCycle.setDescription('This object indicates the duty cycle of interfering device reported in worst air quality report')
cLSiAqInterferenceClassReportDeviceCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 1, 2, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportDeviceCount.setStatus('current')
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportDeviceCount.setDescription('This object indicates the count of total interference devices in the worst air quality report.')
cLSiIdrTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1), )
if mibBuilder.loadTexts: cLSiIdrTable.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrTable.setDescription('This table represents the information about the IDR parameters for a given interfering device. These devices are detected and reported per the dot11 interfaces of the APs that have joined the controller.')
cLSiIdrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1), ).setIndexNames((0, "CISCO-LWAPP-AP-MIB", "cLApSysMacAddress"), (0, "CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId"), (0, "CISCO-LWAPP-SI-MIB", "cLSiIdrDeviceId"))
if mibBuilder.loadTexts: cLSiIdrEntry.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrEntry.setDescription('An entry describes the IDR(Interference Device Report) parameters of the 802.11 radio interface of an AP that has joined the controller. IDR will be generated for each interfering device which has been detected by the access points. A row in this table will be created when a report is received on controller from an AP for the dot11. The rows will be updated when controller has received new IDR report from an AP.')
cLSiIdrDeviceId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: cLSiIdrDeviceId.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrDeviceId.setDescription('This object represents the unique identification number of the interfering device.')
cLSiIdrClusterId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterId.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterId.setDescription('This object indicates the cluster id. The interference causing device can be detected by several CleanAir APs listening on the same channel. The WLC on receiving the records, merges the records to avoid multiple listing for the same interfering device. The WLC clusters records to create one record, with the AP most affected by the interferer as the cluster center.')
cLSiIdrTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrTimeStamp.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrTimeStamp.setDescription("This object indicates the time when the interferer was reported to the controller by access point. This represents number of seconds elapsed since 00:00:00 on January 1, 1970, Coordinated Universal Time (UTC). So a value of '1131362704' means 'Mon Nov 7 16:55:04 2005'.")
cLSiIdrDeviceType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 8, 10, 17, 18, 19, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37))).clone(namedValues=NamedValues(("bluetoothLink", 1), ("microwaveOven", 8), ("eightZeroTwoDot11Fh", 10), ("bluetoothDiscovery", 17), ("tddTransmitter", 18), ("jammer", 19), ("continuousTransmitter", 25), ("dectLikePhone", 26), ("videoCamera", 27), ("eightZeroTwoDot15dot4", 28), ("wifiInverted", 30), ("wifiInvalidChannel", 31), ("superAg", 32), ("radar", 33), ("canopy", 34), ("microsoftDevice", 35), ("wiMaxMobile", 36), ("wiMaxFixed", 37)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrDeviceType.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrDeviceType.setDescription('This object indicates the device type and category of interfering device.')
cLSiIdrSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrSeverity.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrSeverity.setDescription('This object indicates the severity of the interference created by interfering device.')
cLSiIdrDetectingApMac = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1, 6), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrDetectingApMac.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrDetectingApMac.setDescription('This object indicates the mac address of the AP which detected the interfering device.')
cLSiIdrDutyCycle = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setUnits('percent').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrDutyCycle.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrDutyCycle.setDescription('This object indicates the duty cycle of the interfering device.')
cLSiIdrAntennaId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrAntennaId.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrAntennaId.setDescription('This object indicates the antenna information which has detected the interfering device.')
cLSiIdrRssi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-90, -60))).setUnits('dbm').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrRssi.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrRssi.setDescription('This object indicates the RSSI value for transmit channel power.')
cLSiIdrRadioBandId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1, 10), CLDot11Band()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrRadioBandId.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrRadioBandId.setDescription('This object indicates the 802.11 band this entry corresponds to.')
cLSiIdrAffectedChannels = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrAffectedChannels.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrAffectedChannels.setDescription('This object indicates the channels affected by the interfering devices.')
cLSiIdrDeviceSignatureLen = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrDeviceSignatureLen.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrDeviceSignatureLen.setDescription('This object indicates the length of the Device Signature Id of the interfering device.')
cLSiIdrDeviceSignature = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 1, 1, 13), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrDeviceSignature.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrDeviceSignature.setDescription('This object indicates the Device Signature Id of the interfering device.')
cLSiIdrClusterTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2), )
if mibBuilder.loadTexts: cLSiIdrClusterTable.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterTable.setDescription('This table represents the information about the IDR parameters corresponding to clusters within dot11 radio interfaces. Interferers can be detected by several CleanAir APs listening on the same channel. So WLC attempts to merge together interferer records to avoid multiple listings for the same interferer. The WLC tries to cluster records it thinks are for the same interferer into one object, with the AP most affected by the interferer as the cluster center.')
cLSiIdrClusterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1), ).setIndexNames((0, "CISCO-LWAPP-SI-MIB", "cLSiIdrClusterRadioBandId"), (0, "CISCO-LWAPP-SI-MIB", "cLSiIdrClusterClusterId"), (0, "CISCO-LWAPP-SI-MIB", "cLSiIdrClusterDeviceIndex"))
if mibBuilder.loadTexts: cLSiIdrClusterEntry.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterEntry.setDescription('An entry in this table represents the 802.11 IDR parameters corresponding to a cluster within dot11 radio interface.')
cLSiIdrClusterRadioBandId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 1), CLDot11Band()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterRadioBandId.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterRadioBandId.setDescription('This object indicates the 802.11 band this entry corresponds to.')
cLSiIdrClusterClusterId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterClusterId.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterClusterId.setDescription('This object indicates the MAC address of the interfering device for which the cluster is created.')
cLSiIdrClusterDeviceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)))
if mibBuilder.loadTexts: cLSiIdrClusterDeviceIndex.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterDeviceIndex.setDescription('This object represents the device index inside the Cluster.')
cLSiIdrClusterDeviceId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterDeviceId.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterDeviceId.setDescription('This object indicates the device which uniquely identifies an entry in this table.')
cLSiIdrClusterTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterTimeStamp.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterTimeStamp.setDescription("This object indicates the time when the interferer registered with the controller. This represents number of seconds elapsed since 00:00:00 on January 1, 1970, Coordinated Universal Time (UTC). So a value of '1131362704' means 'Mon Nov 7 16:55:04 2005'.")
cLSiIdrClusterDeviceType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 8, 10, 17, 18, 19, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40))).clone(namedValues=NamedValues(("bluetoothLink", 1), ("microwaveOven", 8), ("eightZeroTwoDot11Fh", 10), ("bluetoothDiscovery", 17), ("tddTransmitter", 18), ("jammer", 19), ("continuousTransmitter", 25), ("dectLikePhone", 26), ("videoCamera", 27), ("eightZeroTwoDot15dot4", 28), ("wifiInverted", 30), ("wifiInvalidChannel", 31), ("superAg", 32), ("radar", 33), ("canopy", 34), ("microsoftDevice", 35), ("wiMaxMobile", 36), ("wiMaxFixed", 37), ("wifiAci", 38), ("unclassified", 39), ("unknown", 40)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterDeviceType.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterDeviceType.setDescription('This object indicates the device type and category.')
cLSiIdrClusterSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterSeverity.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterSeverity.setDescription('This object indicates the severity.')
cLSiIdrClusterDetectingApMac = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 8), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterDetectingApMac.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterDetectingApMac.setDescription('This object indicates the mac address of the AP which detected the interfering device.')
cLSiIdrClusterDutyCycle = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setUnits('percent').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterDutyCycle.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterDutyCycle.setDescription('This object indicates the duty cycle.')
cLSiIdrClusterAntennaId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterAntennaId.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterAntennaId.setDescription('This object indicates the antenna information for the cluster.')
cLSiIdrClusterRssi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-90, -60))).setUnits('dbm').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterRssi.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterRssi.setDescription('This object indicates the RSSI value for transmit channel power.')
cLSiIdrClusterAffectedChannels = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterAffectedChannels.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterAffectedChannels.setDescription('This object indicates the affected channels.')
cLSiIdrClusterDeviceSignatureLen = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterDeviceSignatureLen.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterDeviceSignatureLen.setDescription('This object indicates the length of Device Signature.')
cLSiIdrClusterDeviceSignature = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 14), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterDeviceSignature.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterDeviceSignature.setDescription('This object indicates the Device Signature Id.')
cLSiIdrClusterCenterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 15), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterCenterIndex.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterCenterIndex.setDescription('This object indicates the cluster center index. With respect to clustering in CleanAir, the cluster center is the access point which is most affected by the interferer.')
cLSiIdrClusterType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 2, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 8, 10, 17, 18, 19, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40))).clone(namedValues=NamedValues(("bluetoothLink", 1), ("microwaveOven", 8), ("eightZeroTwoDot11Fh", 10), ("bluetoothDiscovery", 17), ("tddTransmitter", 18), ("jammer", 19), ("continuousTransmitter", 25), ("dectLikePhone", 26), ("videoCamera", 27), ("eightZeroTwoDot15dot4", 28), ("wifiInverted", 30), ("wifiInvalidChannel", 31), ("superAg", 32), ("radar", 33), ("canopy", 34), ("microsoftDevice", 35), ("wiMaxMobile", 36), ("wiMaxFixed", 37), ("wifiAci", 38), ("unclassified", 39), ("unknown", 40)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiIdrClusterType.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterType.setDescription('This object indicates the cluster type.')
cLSiDot11BandEventDrivenRrmTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 2), )
if mibBuilder.loadTexts: cLSiDot11BandEventDrivenRrmTable.setStatus('current')
if mibBuilder.loadTexts: cLSiDot11BandEventDrivenRrmTable.setDescription('This table represents the information about the event driven RRM corresponding to the dot11 band of the APs that have joined the controller.')
cLSiDot11BandEventDrivenRrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 2, 1), ).setIndexNames((0, "CISCO-LWAPP-SI-MIB", "cLSiD11Band"))
if mibBuilder.loadTexts: cLSiDot11BandEventDrivenRrmEntry.setStatus('current')
if mibBuilder.loadTexts: cLSiDot11BandEventDrivenRrmEntry.setDescription('An entry in this table represents the event driven RRM on a dot 11 band of an AP that has joined the controller.')
cLSiD11EventDrivenRrmEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 2, 1, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11EventDrivenRrmEnable.setStatus('current')
if mibBuilder.loadTexts: cLSiD11EventDrivenRrmEnable.setDescription("This object indicates whether event driven RRM is enabled on this band. A value of 'true' indicates event driven RRM is enabled. A value of 'false' indicates event driven RRM is disabled.")
cLSiD11EventDrivenRrmThresLvl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("low", 1), ("medium", 2), ("high", 3), ("custom", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11EventDrivenRrmThresLvl.setStatus('current')
if mibBuilder.loadTexts: cLSiD11EventDrivenRrmThresLvl.setDescription("This object represents the event driven RRM threshold level. 'low' - low level event driven RRM threshold value 'medium' - medium level event driven RRM threshold value 'high' - high level event driven RRM threshold value 'custom' - user specified value of even driven RRM threshold")
cLSiD11EventDrivenRrmCustomThresVal = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 3, 2, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 99))).setUnits('percent').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLSiD11EventDrivenRrmCustomThresVal.setStatus('current')
if mibBuilder.loadTexts: cLSiD11EventDrivenRrmCustomThresVal.setDescription("This object represents the event driven RRM Custom threshold value. This object only takes effect when the value of cLSiD11EventDrivenRrmThresLvl is 'custom'.")
cLSiPersistentDeviceTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 3), )
if mibBuilder.loadTexts: cLSiPersistentDeviceTable.setStatus('current')
if mibBuilder.loadTexts: cLSiPersistentDeviceTable.setDescription('This table represents the information about the Persistent interference devices corresponding to the dot11 interface of the APs that have joined the controller for a given device. Persistent devices are category of interference devices that are present right now and will interfere with controller operation, even if they are not detectable all the time.')
cLSiPersistentDeviceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 3, 1), ).setIndexNames((0, "CISCO-LWAPP-AP-MIB", "cLApSysMacAddress"), (0, "CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId"), (0, "CISCO-LWAPP-SI-MIB", "cLSiPersistentDeviceId"))
if mibBuilder.loadTexts: cLSiPersistentDeviceEntry.setStatus('current')
if mibBuilder.loadTexts: cLSiPersistentDeviceEntry.setDescription('An entry in this table represents the 802.11 persistent interference devices of a radio of an AP that has joined the controller.')
cLSiPersistentDeviceId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 3, 1, 1), Unsigned32())
if mibBuilder.loadTexts: cLSiPersistentDeviceId.setStatus('current')
if mibBuilder.loadTexts: cLSiPersistentDeviceId.setDescription('This object represents the device ID .')
cLSiPersistentDeviceType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 3, 1, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiPersistentDeviceType.setStatus('current')
if mibBuilder.loadTexts: cLSiPersistentDeviceType.setDescription('This object indicates the device type and category')
cLSiPersistentTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 3, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiPersistentTimeStamp.setStatus('current')
if mibBuilder.loadTexts: cLSiPersistentTimeStamp.setDescription('This object indicates the Time Stamp when this persistent device was last updated in controller.')
cLSiPersistentDeviceChanTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 4), )
if mibBuilder.loadTexts: cLSiPersistentDeviceChanTable.setStatus('current')
if mibBuilder.loadTexts: cLSiPersistentDeviceChanTable.setDescription('This table represents the detail like Channel number, Duty Cycle(DC), RSSI of channels affected due to interference caused by persistence device.')
cLSiPersistentDeviceChanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 4, 1), ).setIndexNames((0, "CISCO-LWAPP-AP-MIB", "cLApSysMacAddress"), (0, "CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId"), (0, "CISCO-LWAPP-SI-MIB", "cLSiPersistentDeviceId"), (0, "CISCO-LWAPP-SI-MIB", "cLSiPersistentDeviceChanIndex"))
if mibBuilder.loadTexts: cLSiPersistentDeviceChanEntry.setStatus('current')
if mibBuilder.loadTexts: cLSiPersistentDeviceChanEntry.setDescription('An entry in this table represents the 802.11 persistent device affecting which channel(s) and with how much DC level and RSSI.')
cLSiPersistentDeviceChanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 4, 1, 1), Unsigned32())
if mibBuilder.loadTexts: cLSiPersistentDeviceChanIndex.setStatus('current')
if mibBuilder.loadTexts: cLSiPersistentDeviceChanIndex.setDescription('This object represents the index to the channel affected by the persistent device.')
cLSiChannelAffected = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 4, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiChannelAffected.setStatus('current')
if mibBuilder.loadTexts: cLSiChannelAffected.setDescription('This object indicates the channel affected by the persistent device')
cLSiChannelUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 4, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiChannelUtil.setStatus('current')
if mibBuilder.loadTexts: cLSiChannelUtil.setDescription('This object indicates the DC level in a channel.')
cLSiChannelRSSI = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 1, 2, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-127, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLSiChannelRSSI.setStatus('current')
if mibBuilder.loadTexts: cLSiChannelRSSI.setDescription('This object indicates the RSSI in a channel.')
cLSiAlarmClear = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 3, 1), TruthValue()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: cLSiAlarmClear.setStatus('current')
if mibBuilder.loadTexts: cLSiAlarmClear.setDescription("This object specifies whether this event is raise or clear. A value of 'true' indicates this event is cleared A value of 'false' indicates this even is raised.")
cLSiIdrPreviousClusterId = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 3, 2), MacAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: cLSiIdrPreviousClusterId.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrPreviousClusterId.setDescription('This object represents the previous cluster id.')
cLSiApAqLimit = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 3, 3), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: cLSiApAqLimit.setStatus('current')
if mibBuilder.loadTexts: cLSiApAqLimit.setDescription('This object represents limit on number of Monitor Mode APs supported for Air Quality monitoring.')
cLSiD11IdrUnclassifiedCurrentSevIndex = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 3, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setUnits('percent').setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: cLSiD11IdrUnclassifiedCurrentSevIndex.setStatus('current')
if mibBuilder.loadTexts: cLSiD11IdrUnclassifiedCurrentSevIndex.setDescription('This object indicates the current severity index for unclassified category of interference devices. If the severity value goes above threshold indicated by cLSiD11IdrUnclassifiedTrapThreshold, a notification would be generated.')
ciscoLwappSiAqLow = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 0, 1)).setObjects(("CISCO-LWAPP-AP-MIB", "cLApSysMacAddress"), ("CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId"), ("CISCO-LWAPP-SI-MIB", "cLSiAqChannelNumber"), ("CISCO-LWAPP-SI-MIB", "cLSiAqIndex"), ("CISCO-LWAPP-SI-MIB", "cLSiD11AqiTrapThreshold"), ("CISCO-LWAPP-SI-MIB", "cLSiAlarmClear"))
if mibBuilder.loadTexts: ciscoLwappSiAqLow.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoLwappSiAqLow.setDescription('This notification is generated when the air quality index of an AP falls below a specified threshold value indicated by cLSiD11AqiTrapThreshold. ciscoLwappSiAqLow object is superseded by ciscoLwappSiAqLowRev1.')
ciscoLwappSiIdrDevice = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 0, 2)).setObjects(("CISCO-LWAPP-AP-MIB", "cLApSysMacAddress"), ("CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrDeviceId"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrDeviceType"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrAffectedChannels"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrSeverity"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterId"), ("CISCO-LWAPP-SI-MIB", "cLSiAlarmClear"), ("CISCO-LWAPP-AP-MIB", "cLApName"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrPreviousClusterId"))
if mibBuilder.loadTexts: ciscoLwappSiIdrDevice.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoLwappSiIdrDevice.setDescription('This notification is generated when a device has been identified as a interferer. This notification can be configured per interference device category. ciscoLwappSiIdrDevice object is superseded by ciscoLwappSiIdrDeviceRev1.')
ciscoLwappSiSensorCrash = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 0, 3)).setObjects(("CISCO-LWAPP-AP-MIB", "cLApSysMacAddress"), ("CISCO-LWAPP-AP-MIB", "cLApName"), ("CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId"), ("CISCO-LWAPP-SI-MIB", "cLSiApIfSensordOperationalStatus"), ("CISCO-LWAPP-SI-MIB", "cLSiApIfSensordErrorCode"))
if mibBuilder.loadTexts: ciscoLwappSiSensorCrash.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoLwappSiSensorCrash.setDescription('This notification is generated when a crash is observed in the SensorD functionality of a radio on an AP. ciscoLwappSiSensorCrash object is superseded by ciscoLwappSiSensorCrashRev1.')
ciscoLwappSiAqBufferUnavailable = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 0, 4)).setObjects(("CISCO-LWAPP-AP-MIB", "cLApSysMacAddress"), ("CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId"), ("CISCO-LWAPP-SI-MIB", "cLSiApAqLimit"), ("CISCO-LWAPP-SI-MIB", "cLSiAlarmClear"))
if mibBuilder.loadTexts: ciscoLwappSiAqBufferUnavailable.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoLwappSiAqBufferUnavailable.setDescription('This notification is generated when the controller detects that the Air Quality Buffer is unavailable. ciscoLwappSiAqBufferUnavailable object is superseded by ciscoLwappSiAqBufferUnavailableRev1.')
ciscoLwappSiAqLowSeverityHigh = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 0, 5)).setObjects(("CISCO-LWAPP-AP-MIB", "cLApSysMacAddress"), ("CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId"), ("CISCO-LWAPP-SI-MIB", "cLSiAqChannelNumber"), ("CISCO-LWAPP-SI-MIB", "cLSiAqIndex"), ("CISCO-LWAPP-SI-MIB", "cLSiD11AqiTrapThreshold"), ("CISCO-LWAPP-SI-MIB", "cLSiD11IdrUnclassifiedTrapThreshold"), ("CISCO-LWAPP-SI-MIB", "cLSiD11IdrUnclassifiedCurrentSevIndex"), ("CISCO-LWAPP-SI-MIB", "cLSiAlarmClear"))
if mibBuilder.loadTexts: ciscoLwappSiAqLowSeverityHigh.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoLwappSiAqLowSeverityHigh.setDescription('This notification is generated when the air quality index of an APfalls below a specified threshold value indicated by cLSiD11AqiTrapThreshold or severity value goes above the threshold indicated by cLSiD11IdrUnclassifiedCurrentSevIndex. cLApSysMacAddress - MAC address of the access point. cLApDot11IfSlotId - radio interface slot. cLSiAqChannelNumber - channel number. cLSiAqIndex - air quality index. cLSiD11AqiTrapThreshold - threshol value of air quality considered for generation of trap. cLSiD11IdrUnclassifiedTrapThreshold - severity threshold for unclassified interference category. cLSiAlarmClear - a truth value showing if event was raised or cleared. ciscoLwappSiAqLowSeverityHigh object is superseded by ciscoLwappSiAqLowSeverityHighRev1.')
ciscoLwappSiAqLowRev1 = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 0, 6)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiAqIndex"), ("CISCO-LWAPP-SI-MIB", "cLSiD11AqiTrapThreshold"), ("CISCO-LWAPP-SI-MIB", "cLSiAlarmClear"))
if mibBuilder.loadTexts: ciscoLwappSiAqLowRev1.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappSiAqLowRev1.setDescription('This notification is generated when the air quality index of an AP falls below a specified threshold value indicated by cLSiD11AqiTrapThreshold.')
ciscoLwappSiIdrDeviceRev1 = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 0, 7)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiIdrDeviceType"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrAffectedChannels"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrSeverity"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterId"), ("CISCO-LWAPP-SI-MIB", "cLSiAlarmClear"), ("CISCO-LWAPP-AP-MIB", "cLApName"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrPreviousClusterId"))
if mibBuilder.loadTexts: ciscoLwappSiIdrDeviceRev1.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappSiIdrDeviceRev1.setDescription('This notification is generated when a device has been identified as a interferer. This notification can be configured per interference device category.')
ciscoLwappSiSensorCrashRev1 = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 0, 8)).setObjects(("CISCO-LWAPP-AP-MIB", "cLApName"), ("CISCO-LWAPP-SI-MIB", "cLSiApIfSensordOperationalStatus"), ("CISCO-LWAPP-SI-MIB", "cLSiApIfSensordErrorCode"))
if mibBuilder.loadTexts: ciscoLwappSiSensorCrashRev1.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappSiSensorCrashRev1.setDescription('This notification is generated when a crash is observed in the SensorD functionality of a radio on an AP.')
ciscoLwappSiAqBufferUnavailableRev1 = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 0, 9)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiApAqLimit"), ("CISCO-LWAPP-SI-MIB", "cLSiAlarmClear"))
if mibBuilder.loadTexts: ciscoLwappSiAqBufferUnavailableRev1.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappSiAqBufferUnavailableRev1.setDescription('This notification is generated when the controller detects that the Air Quality Buffer is unavailable.')
ciscoLwappSiAqLowSeverityHighRev1 = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 0, 10)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiAqIndex"), ("CISCO-LWAPP-SI-MIB", "cLSiD11AqiTrapThreshold"), ("CISCO-LWAPP-SI-MIB", "cLSiD11IdrUnclassifiedTrapThreshold"), ("CISCO-LWAPP-SI-MIB", "cLSiD11IdrUnclassifiedCurrentSevIndex"), ("CISCO-LWAPP-SI-MIB", "cLSiAlarmClear"))
if mibBuilder.loadTexts: ciscoLwappSiAqLowSeverityHighRev1.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappSiAqLowSeverityHighRev1.setDescription('This notification is generated when the air quality index of an AP falls below a specified threshold value indicated by cLSiD11AqiTrapThreshold or severity value goes above the threshold indicated by cLSiD11IdrUnclassifiedCurrentSevIndex. cLApSysMacAddress - MAC address of the access point. cLApDot11IfSlotId - radio interface slot. cLSiAqChannelNumber - channel number. cLSiAqIndex - air quality index. cLSiD11AqiTrapThreshold - threshold value of air quality considered for generation of trap. cLSiD11IdrUnclassifiedTrapThreshold - severity threshold for unclassified interference category. cLSiAlarmClear - a truth value showing if event was raised or cleared.')
ciscoLwappSiMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 1))
ciscoLwappSiMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2))
ciscoLwappApSiMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 1, 1)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiApIfConfigGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiApIfStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiD11ConfigGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiAqChannelStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiAqInterferenceClassReportStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiD11EventDrivenRrmConfigGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiPersistentDeviceStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiPersistentDeviceChanStatusGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappApSiMIBCompliance = ciscoLwappApSiMIBCompliance.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoLwappApSiMIBCompliance.setDescription('The compliance statement for the SNMP entities that implement the ciscoLwappSiMIB module.')
ciscoLwappApSiMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 1, 2)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiApIfConfigGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiApIfStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiD11ConfigGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiD11ConfigSup1Group"), ("CISCO-LWAPP-SI-MIB", "cLSiAqChannelStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiAqInterferenceClassReportStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiD11EventDrivenRrmConfigGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiD11EventDrivenRrmConfigSup1Group"), ("CISCO-LWAPP-SI-MIB", "cLSiPersistentDeviceStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiPersistentDeviceChanStatusGroup"), ("CISCO-LWAPP-SI-MIB", "ciscoLwappSiMIBNotifGroup"), ("CISCO-LWAPP-SI-MIB", "ciscoLwappSiMIBNotifVariableGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappApSiMIBComplianceRev1 = ciscoLwappApSiMIBComplianceRev1.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoLwappApSiMIBComplianceRev1.setDescription('The compliance statement for the SNMP entities that implement the ciscoLwappSiMIB module.')
ciscoLwappApSiMIBComplianceRev2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 1, 3)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiApIfConfigGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiApIfStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiD11ConfigGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiD11ConfigSup1Group"), ("CISCO-LWAPP-SI-MIB", "cLSiAqChannelStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiAqInterferenceClassReportStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiD11EventDrivenRrmConfigGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiD11EventDrivenRrmConfigSup1Group"), ("CISCO-LWAPP-SI-MIB", "cLSiPersistentDeviceStatusGroup"), ("CISCO-LWAPP-SI-MIB", "cLSiPersistentDeviceChanStatusGroup"), ("CISCO-LWAPP-SI-MIB", "ciscoLwappSiMIBNotifVariableGroup"), ("CISCO-LWAPP-SI-MIB", "ciscoLwappSiMIBNotifGroupRev1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappApSiMIBComplianceRev2 = ciscoLwappApSiMIBComplianceRev2.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappApSiMIBComplianceRev2.setDescription('The compliance statement for the SNMP entities that implement the ciscoLwappSiMIB module.')
cLSiApIfConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 1)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiApIfSpectrumIntelligenceEnable"), ("CISCO-LWAPP-SI-MIB", "cLSiApIfRapidUpdateEnable"), ("CISCO-LWAPP-SI-MIB", "cLSiApIfDetailSpectrumModeEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLSiApIfConfigGroup = cLSiApIfConfigGroup.setStatus('current')
if mibBuilder.loadTexts: cLSiApIfConfigGroup.setDescription('This collection of objects represents the general configuration related information about the Spectrum Intelligence(SI) functionality of the dot11 interface of an AP that has joined the controller.')
cLSiApIfStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 2)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiApIfSpectrumCapable"), ("CISCO-LWAPP-SI-MIB", "cLSiApIfSensordOperationalStatus"), ("CISCO-LWAPP-SI-MIB", "cLSiApIfNumOfSeActiveConnection"), ("CISCO-LWAPP-SI-MIB", "cLSiApIfSensordErrorCode"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLSiApIfStatusGroup = cLSiApIfStatusGroup.setStatus('current')
if mibBuilder.loadTexts: cLSiApIfStatusGroup.setDescription('This collection of objects represents the general status related information about the Spectrum Intelligence(SI) functionality of the dot11 interface of an AP that has joined the controller.')
cLSiD11ConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 3)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiD11SpectrumIntelligenceEnable"), ("CISCO-LWAPP-SI-MIB", "cLSiD11InterferenceDeviceList"), ("CISCO-LWAPP-SI-MIB", "cLSiD11PollingInterval"), ("CISCO-LWAPP-SI-MIB", "cLSiD11IdrReportingEnable"), ("CISCO-LWAPP-SI-MIB", "cLSiD11AqiTrapEnable"), ("CISCO-LWAPP-SI-MIB", "cLSiD11AqiTrapThreshold"), ("CISCO-LWAPP-SI-MIB", "cLSiD11IdrTrapEnable"), ("CISCO-LWAPP-SI-MIB", "cLSiD11IdrTrapDeviceList"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLSiD11ConfigGroup = cLSiD11ConfigGroup.setStatus('current')
if mibBuilder.loadTexts: cLSiD11ConfigGroup.setDescription('This collection of objects represents the general configuration related information about the Air Quality (AQ) of the dot11 interface of a controller.')
cLSiAqChannelStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 4)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiAqMinIndex"), ("CISCO-LWAPP-SI-MIB", "cLSiAqIndex"), ("CISCO-LWAPP-SI-MIB", "cLSiAqTotalChannelPower"), ("CISCO-LWAPP-SI-MIB", "cLSiAqTotalChannelDutyCycle"), ("CISCO-LWAPP-SI-MIB", "cLSiAqInterferencePower"), ("CISCO-LWAPP-SI-MIB", "cLSiAqInterferenceDutyCycle"), ("CISCO-LWAPP-SI-MIB", "cLSiAqInterferenceDeviceCount"), ("CISCO-LWAPP-SI-MIB", "cLSiAqInterfererClassReportCount"), ("CISCO-LWAPP-SI-MIB", "cLSiAqTimeStamp"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLSiAqChannelStatusGroup = cLSiAqChannelStatusGroup.setStatus('current')
if mibBuilder.loadTexts: cLSiAqChannelStatusGroup.setDescription('This collection of objects represents the general status related information about the Air Quality (AQ) of a channel on a dot11 interface of an AP that has joined the controller.')
cLSiAqInterferenceClassReportStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 5)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiAqInterferenceClassReportDeviceClass"), ("CISCO-LWAPP-SI-MIB", "cLSiAqInterferenceClassReportSeverityIndex"), ("CISCO-LWAPP-SI-MIB", "cLSiAqInterferenceClassReportPower"), ("CISCO-LWAPP-SI-MIB", "cLSiAqInterferenceClassReportDutyCycle"), ("CISCO-LWAPP-SI-MIB", "cLSiAqInterferenceClassReportDeviceCount"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLSiAqInterferenceClassReportStatusGroup = cLSiAqInterferenceClassReportStatusGroup.setStatus('current')
if mibBuilder.loadTexts: cLSiAqInterferenceClassReportStatusGroup.setDescription('This collection of objects represents the general status related information about the Air Quality (AQ) interference category on a channel for a dot11 interface of an AP that has joined the controller.')
cLSiIdrStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 6)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterId"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrTimeStamp"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrDeviceType"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrSeverity"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrDetectingApMac"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrDutyCycle"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrAntennaId"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrRssi"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrRadioBandId"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrAffectedChannels"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrDeviceSignatureLen"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrDeviceSignature"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLSiIdrStatusGroup = cLSiIdrStatusGroup.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrStatusGroup.setDescription('This collection of objects represents the general status related information about Interference Device Reports(IDR) corresponding to the dot11 interfaces of the APs that have joined the controller for a given device.')
cLSiIdrClusterStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 7)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterRadioBandId"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterClusterId"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterDeviceId"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterTimeStamp"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterDeviceType"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterSeverity"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterDetectingApMac"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterDutyCycle"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterAntennaId"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterRssi"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterAffectedChannels"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterDeviceSignatureLen"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterDeviceSignature"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterCenterIndex"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrClusterType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLSiIdrClusterStatusGroup = cLSiIdrClusterStatusGroup.setStatus('current')
if mibBuilder.loadTexts: cLSiIdrClusterStatusGroup.setDescription('This collection of objects represents the general status related information about Interference Device Reports(IDR) per cluster on the dot11 interfaces for the controller for a given device.')
cLSiD11EventDrivenRrmConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 8)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiD11EventDrivenRrmEnable"), ("CISCO-LWAPP-SI-MIB", "cLSiD11EventDrivenRrmThresLvl"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLSiD11EventDrivenRrmConfigGroup = cLSiD11EventDrivenRrmConfigGroup.setStatus('current')
if mibBuilder.loadTexts: cLSiD11EventDrivenRrmConfigGroup.setDescription('This collection of objects represents the general configuration related information about event driven RRM (Radio Resource Management) corresponding to the dot11 band of a controller.')
cLSiPersistentDeviceStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 9)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiPersistentDeviceType"), ("CISCO-LWAPP-SI-MIB", "cLSiPersistentTimeStamp"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLSiPersistentDeviceStatusGroup = cLSiPersistentDeviceStatusGroup.setStatus('current')
if mibBuilder.loadTexts: cLSiPersistentDeviceStatusGroup.setDescription('This collection of objects represents the general status related information about persistent interference devices of a radio of an AP that has joined the controller.')
cLSiPersistentDeviceChanStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 10)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiChannelAffected"), ("CISCO-LWAPP-SI-MIB", "cLSiChannelUtil"), ("CISCO-LWAPP-SI-MIB", "cLSiChannelRSSI"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLSiPersistentDeviceChanStatusGroup = cLSiPersistentDeviceChanStatusGroup.setStatus('current')
if mibBuilder.loadTexts: cLSiPersistentDeviceChanStatusGroup.setDescription('This collection of objects represents the general status related information about persistent interference devices per channel on a radio of an AP that has joined the controller.')
cLSiD11ConfigSup1Group = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 11)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiD11IdrPersistentDevicePropagation"), ("CISCO-LWAPP-SI-MIB", "cLSiD11IdrUnclassifiedTrapEnable"), ("CISCO-LWAPP-SI-MIB", "cLSiD11IdrUnclassifiedTrapThreshold"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLSiD11ConfigSup1Group = cLSiD11ConfigSup1Group.setStatus('current')
if mibBuilder.loadTexts: cLSiD11ConfigSup1Group.setDescription('This collection of objects represents the additional general configuration related information about the Air Quality (AQ) of the dot11 interface of a controller.')
cLSiD11EventDrivenRrmConfigSup1Group = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 12)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiD11EventDrivenRrmCustomThresVal"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLSiD11EventDrivenRrmConfigSup1Group = cLSiD11EventDrivenRrmConfigSup1Group.setStatus('current')
if mibBuilder.loadTexts: cLSiD11EventDrivenRrmConfigSup1Group.setDescription('This collection of objects specifies the general configuration related additional information about event driven RRM (Radio Resource Management) corresponding to the dot11 band of a controller.')
ciscoLwappSiMIBNotifGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 13)).setObjects(("CISCO-LWAPP-SI-MIB", "ciscoLwappSiAqLow"), ("CISCO-LWAPP-SI-MIB", "ciscoLwappSiIdrDevice"), ("CISCO-LWAPP-SI-MIB", "ciscoLwappSiSensorCrash"), ("CISCO-LWAPP-SI-MIB", "ciscoLwappSiAqBufferUnavailable"), ("CISCO-LWAPP-SI-MIB", "ciscoLwappSiAqLowSeverityHigh"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappSiMIBNotifGroup = ciscoLwappSiMIBNotifGroup.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoLwappSiMIBNotifGroup.setDescription('This collection of objects represents the notifications defined within this MIB file. ciscoLwappSiMIBNotifGroup object is superseded by ciscoLwappSiMIBNotifGroupRev1.')
ciscoLwappSiMIBNotifVariableGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 14)).setObjects(("CISCO-LWAPP-SI-MIB", "cLSiAlarmClear"), ("CISCO-LWAPP-SI-MIB", "cLSiIdrPreviousClusterId"), ("CISCO-LWAPP-SI-MIB", "cLSiApAqLimit"), ("CISCO-LWAPP-SI-MIB", "cLSiD11IdrUnclassifiedCurrentSevIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappSiMIBNotifVariableGroup = ciscoLwappSiMIBNotifVariableGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappSiMIBNotifVariableGroup.setDescription('This collection of objects represents the notification related parameter within this MIB file.')
ciscoLwappSiMIBNotifGroupRev1 = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 513, 1, 6, 1, 2, 2, 15)).setObjects(("CISCO-LWAPP-SI-MIB", "ciscoLwappSiAqLowRev1"), ("CISCO-LWAPP-SI-MIB", "ciscoLwappSiIdrDeviceRev1"), ("CISCO-LWAPP-SI-MIB", "ciscoLwappSiSensorCrashRev1"), ("CISCO-LWAPP-SI-MIB", "ciscoLwappSiAqBufferUnavailableRev1"), ("CISCO-LWAPP-SI-MIB", "ciscoLwappSiAqLowSeverityHighRev1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappSiMIBNotifGroupRev1 = ciscoLwappSiMIBNotifGroupRev1.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappSiMIBNotifGroupRev1.setDescription('This collection of objects represents the notifications defined within this MIB file.')
mibBuilder.exportSymbols("CISCO-LWAPP-SI-MIB", cLSiApIfSpectrumIntelligenceEnable=cLSiApIfSpectrumIntelligenceEnable, cLSiD11EventDrivenRrmCustomThresVal=cLSiD11EventDrivenRrmCustomThresVal, ciscoLwappAirQuality=ciscoLwappAirQuality, ciscoLwappApSiMIBComplianceRev2=ciscoLwappApSiMIBComplianceRev2, cLSiIdrClusterDeviceSignature=cLSiIdrClusterDeviceSignature, cLSiIdrEntry=cLSiIdrEntry, cLSiDot11BandEventDrivenRrmEntry=cLSiDot11BandEventDrivenRrmEntry, PYSNMP_MODULE_ID=ciscoLwappSiMIB, cLSiPersistentDeviceTable=cLSiPersistentDeviceTable, cLSiAqTimeStamp=cLSiAqTimeStamp, cLSiIdrClusterStatusGroup=cLSiIdrClusterStatusGroup, cLSiApIfSpectrumCapable=cLSiApIfSpectrumCapable, cLSiIdrClusterDeviceIndex=cLSiIdrClusterDeviceIndex, cLSiChannelAffected=cLSiChannelAffected, ciscoLwappSiMIBNotifs=ciscoLwappSiMIBNotifs, ciscoLwappSiApIf=ciscoLwappSiApIf, cLSiD11IdrTrapEnable=cLSiD11IdrTrapEnable, ciscoLwappSiAqLowSeverityHigh=ciscoLwappSiAqLowSeverityHigh, cLSiIdrRssi=cLSiIdrRssi, cLSiAqTotalChannelPower=cLSiAqTotalChannelPower, cLSiPersistentDeviceId=cLSiPersistentDeviceId, cLSiPersistentTimeStamp=cLSiPersistentTimeStamp, cLSiIdrPreviousClusterId=cLSiIdrPreviousClusterId, cLSiIdrDeviceType=cLSiIdrDeviceType, ciscoLwappSiSensorCrashRev1=ciscoLwappSiSensorCrashRev1, cLSiApIfEntry=cLSiApIfEntry, cLSiApIfNumOfSeActiveConnection=cLSiApIfNumOfSeActiveConnection, ciscoLwappSiMIBNotifGroupRev1=ciscoLwappSiMIBNotifGroupRev1, ciscoLwappApSiMIBComplianceRev1=ciscoLwappApSiMIBComplianceRev1, cLSiIdrClusterDetectingApMac=cLSiIdrClusterDetectingApMac, ciscoLwappSiMIBNotifObjects=ciscoLwappSiMIBNotifObjects, cLSiD11IdrReportingEnable=cLSiD11IdrReportingEnable, cLSiApIfRapidUpdateEnable=cLSiApIfRapidUpdateEnable, cLSiApIfDetailSpectrumModeEnable=cLSiApIfDetailSpectrumModeEnable, cLSiIdrClusterDeviceSignatureLen=cLSiIdrClusterDeviceSignatureLen, cLSiAqChannelNumber=cLSiAqChannelNumber, cLSiAqMinIndex=cLSiAqMinIndex, cLSiD11IdrTrapDeviceList=cLSiD11IdrTrapDeviceList, cLSiAqInterferenceClassReportIndex=cLSiAqInterferenceClassReportIndex, cLSiD11Band=cLSiD11Band, cLSiIdrDeviceId=cLSiIdrDeviceId, cLSiPersistentDeviceChanIndex=cLSiPersistentDeviceChanIndex, cLSiD11EventDrivenRrmEnable=cLSiD11EventDrivenRrmEnable, cLSiIdrClusterAntennaId=cLSiIdrClusterAntennaId, cLSiChannelRSSI=cLSiChannelRSSI, ciscoLwappSiAqLowRev1=ciscoLwappSiAqLowRev1, cLSiAqEntry=cLSiAqEntry, cLSiIdrClusterDeviceId=cLSiIdrClusterDeviceId, cLSiD11EventDrivenRrmConfigSup1Group=cLSiD11EventDrivenRrmConfigSup1Group, cLSiD11EventDrivenRrmConfigGroup=cLSiD11EventDrivenRrmConfigGroup, ciscoLwappSiAqLow=ciscoLwappSiAqLow, cLSiAqIndex=cLSiAqIndex, cLSiApIfSensordErrorCode=cLSiApIfSensordErrorCode, cLSiIdrClusterRssi=cLSiIdrClusterRssi, ciscoLwappSiDot11Band=ciscoLwappSiDot11Band, cLSiD11IdrPersistentDevicePropagation=cLSiD11IdrPersistentDevicePropagation, cLSiPersistentDeviceEntry=cLSiPersistentDeviceEntry, cLSiIdrClusterRadioBandId=cLSiIdrClusterRadioBandId, cLSiD11IdrUnclassifiedCurrentSevIndex=cLSiD11IdrUnclassifiedCurrentSevIndex, cLSiIdrRadioBandId=cLSiIdrRadioBandId, cLSiAqInterferenceDutyCycle=cLSiAqInterferenceDutyCycle, cLSiApIfStatusGroup=cLSiApIfStatusGroup, cLSiIdrClusterClusterId=cLSiIdrClusterClusterId, cLSiAqInterferenceClassReportTable=cLSiAqInterferenceClassReportTable, cLSiD11InterferenceDeviceList=cLSiD11InterferenceDeviceList, ciscoLwappSiSensorCrash=ciscoLwappSiSensorCrash, cLSiAqInterferenceClassReportDeviceCount=cLSiAqInterferenceClassReportDeviceCount, cLSiD11AqiTrapEnable=cLSiD11AqiTrapEnable, cLSiAqTable=cLSiAqTable, cLSiAqInterferenceClassReportSeverityIndex=cLSiAqInterferenceClassReportSeverityIndex, cLSiDot11BandTable=cLSiDot11BandTable, cLSiIdrClusterAffectedChannels=cLSiIdrClusterAffectedChannels, cLSiD11IdrUnclassifiedTrapEnable=cLSiD11IdrUnclassifiedTrapEnable, cLSiAqInterferenceClassReportStatusGroup=cLSiAqInterferenceClassReportStatusGroup, cLSiD11SpectrumIntelligenceEnable=cLSiD11SpectrumIntelligenceEnable, cLSiD11ConfigGroup=cLSiD11ConfigGroup, ciscoLwappInterference=ciscoLwappInterference, cLSiIdrDeviceSignatureLen=cLSiIdrDeviceSignatureLen, cLSiAqInterferenceClassReportPower=cLSiAqInterferenceClassReportPower, cLSiPersistentDeviceChanTable=cLSiPersistentDeviceChanTable, cLSiD11PollingInterval=cLSiD11PollingInterval, ciscoLwappSiAqBufferUnavailable=ciscoLwappSiAqBufferUnavailable, cLSiApIfTable=cLSiApIfTable, cLSiAqInterferenceDeviceCount=cLSiAqInterferenceDeviceCount, ciscoLwappSiAqBufferUnavailableRev1=ciscoLwappSiAqBufferUnavailableRev1, ciscoLwappSiMIBCompliances=ciscoLwappSiMIBCompliances, cLSiIdrTable=cLSiIdrTable, cLSiIdrAffectedChannels=cLSiIdrAffectedChannels, cLSiIdrStatusGroup=cLSiIdrStatusGroup, ciscoLwappSiMIB=ciscoLwappSiMIB, cLSiD11EventDrivenRrmThresLvl=cLSiD11EventDrivenRrmThresLvl, cLSiD11ConfigSup1Group=cLSiD11ConfigSup1Group, cLSiIdrClusterDeviceType=cLSiIdrClusterDeviceType, cLSiD11AqiTrapThreshold=cLSiD11AqiTrapThreshold, cLSiIdrDetectingApMac=cLSiIdrDetectingApMac, ciscoLwappSiMIBNotifVariableGroup=ciscoLwappSiMIBNotifVariableGroup, cLSiPersistentDeviceStatusGroup=cLSiPersistentDeviceStatusGroup, cLSiPersistentDeviceChanEntry=cLSiPersistentDeviceChanEntry, ciscoLwappApSiMIBCompliance=ciscoLwappApSiMIBCompliance, ciscoLwappSiMIBConform=ciscoLwappSiMIBConform, cLSiAqTotalChannelDutyCycle=cLSiAqTotalChannelDutyCycle, cLSiIdrClusterId=cLSiIdrClusterId, ciscoLwappSiMIBGroups=ciscoLwappSiMIBGroups, cLSiPersistentDeviceType=cLSiPersistentDeviceType, cLSiIdrDutyCycle=cLSiIdrDutyCycle, cLSiDot11BandEventDrivenRrmTable=cLSiDot11BandEventDrivenRrmTable, cLSiIdrTimeStamp=cLSiIdrTimeStamp, cLSiIdrClusterTable=cLSiIdrClusterTable, cLSiChannelUtil=cLSiChannelUtil, cLSiAqInterferencePower=cLSiAqInterferencePower, cLSiAqInterfererClassReportCount=cLSiAqInterfererClassReportCount, cLSiD11IdrUnclassifiedTrapThreshold=cLSiD11IdrUnclassifiedTrapThreshold, ciscoLwappSiAqLowSeverityHighRev1=ciscoLwappSiAqLowSeverityHighRev1, cLSiAqInterferenceClassReportDutyCycle=cLSiAqInterferenceClassReportDutyCycle, cLSiApAqLimit=cLSiApAqLimit, cLSiIdrClusterCenterIndex=cLSiIdrClusterCenterIndex, ciscoLwappSiMIBObjects=ciscoLwappSiMIBObjects, cLSiIdrAntennaId=cLSiIdrAntennaId, cLSiApIfConfigGroup=cLSiApIfConfigGroup, cLSiAqInterferenceClassReportDeviceClass=cLSiAqInterferenceClassReportDeviceClass, cLSiIdrClusterEntry=cLSiIdrClusterEntry, cLSiAqChannelStatusGroup=cLSiAqChannelStatusGroup, cLSiIdrSeverity=cLSiIdrSeverity, cLSiIdrClusterTimeStamp=cLSiIdrClusterTimeStamp, cLSiIdrClusterDutyCycle=cLSiIdrClusterDutyCycle, cLSiAqInterferenceClassReportEntry=cLSiAqInterferenceClassReportEntry, ciscoLwappSiMIBNotifGroup=ciscoLwappSiMIBNotifGroup, cLSiIdrClusterType=cLSiIdrClusterType, ciscoLwappSiIdrDeviceRev1=ciscoLwappSiIdrDeviceRev1, cLSiIdrClusterSeverity=cLSiIdrClusterSeverity, cLSiDot11BandEntry=cLSiDot11BandEntry, cLSiApIfSensordOperationalStatus=cLSiApIfSensordOperationalStatus, cLSiAlarmClear=cLSiAlarmClear, ciscoLwappSiIdrDevice=ciscoLwappSiIdrDevice, cLSiPersistentDeviceChanStatusGroup=cLSiPersistentDeviceChanStatusGroup, cLSiIdrDeviceSignature=cLSiIdrDeviceSignature)
|
import numpy as np
np.set_printoptions(legacy='1.13')
n,m = map(int,input().split())
arr = np.array([input().split() for _ in range(n)],int)
print(np.mean(arr,axis=1))
print(np.var(arr,axis=0))
print(np.std(arr,axis=None)) |
from selenium import webdriver
browser=webdriver.Firefox()
browser.get('http://inventwithpython.com')
linkElem=browser.find_element_by_link_text('Read Online for Free')
linkElem.click() |
#!/usr/bin/env python
import unittest
from flosrv import FlorinCoinSrv
import flosrv
from metachains import Florincoin, Synchronizer
from decimal import Decimal
import os
class MockCoin(object):
@property
def MaxPayloadSize(self): return 16
def block_count(self):
return 128
def blocks(self, num):
sample_block = {'height': 9999, }
return [ sample_block ] * 5
def send_data_address(self, payload, addr, amount):
pass
def transactions(self, block):
return {}
class MockCloud(object):
def __init__(self):
self.i = 1
self.data_loaded = {}
def last_known_block(self):
return 0
def visit_block(self, blocknum):
pass
def data_load(self, info, txid):
self.data_loaded[txid] = info
def data_dump(self, max_):
self.i -= 1
if self.i <= 0:
return None
else:
return b'PAYLOAD' * 32
class FlorincoinTest(unittest.TestCase):
DATA_SIZE = 2*1024
def setUp(self):
FlorinCoinSrv.Blocks = { hash(i): { 'address': 0, 'amount': 0, 'tx-comment': '-', 'tx': [], 'height': i, 'hash': hash(i), } for i in range(10) }
# FlorinCoinSrv.Transactions = { }
flosrv.Transactions = { }
self.srv = FlorinCoinSrv()
self.flo = Florincoin(self.srv.url, self.srv.username, self.srv.passwd)
self.sync = Synchronizer(self.flo, MockCloud())
def tearDown(self):
del self.flo
del self.srv
def test_misc(self):
'''Check various jsonrpc queries
'''
block_count = self.flo.block_count()
assert block_count
response = self.flo.balance()
assert response
response = list(self.flo.blocks(0, block_count))
assert len(response) == block_count
response = self.flo.address(0)
assert response
def test_transactions(self):
invalid_block = {
'tx': 0,
}
response = self.flo.transactions(invalid_block)
assert response
def test_send(self):
'''Test 'sendtoaddress' method used to store metadata
'''
large_data_corpus = b'i' * (FlorincoinTest.DATA_SIZE)
txid = self.flo.send_data_address(large_data_corpus, 'addr', Decimal('0.01'))
assert txid
assert txid in self.srv.httpd.Transactions
@unittest.skip('unrealistic test case')
def test_send_high_entropy(self):
high_entropy = os.urandom(FlorincoinTest.DATA_SIZE)
response = self.flo.send_data_address(high_entropy, 'addr', Decimal('0.01'))
assert response
def test_roundtrip(self):
large_data_corpus = b'R' * (FlorincoinTest.DATA_SIZE)
txid = self.flo.send_data_address(large_data_corpus, 'addr', Decimal('0.01'))
assert txid != None
assert txid in self.srv.httpd.Transactions
tx_entry = self.flo._get_transaction(txid)
assert 'tx-comment' in tx_entry
# self.sync.scan_database()
self.sync.scan_blockchain()
assert txid in self.srv.httpd.Transactions
assert txid in self.sync.cloud.data_loaded
class SyncTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_scan(self):
coin = MockCoin()
cloud = MockCloud()
sync = Synchronizer(coin, cloud)
sync.scan_database()
assert cloud.i == 0
sync.scan_blockchain()
|
import attr
from viberio.types.base import ViberBaseObject
@attr.s
class Message(ViberBaseObject):
tracking_data: str = attr.ib(default=None)
keyboard: str = attr.ib(default=None)
min_api_version: str = attr.ib(default=None)
alt_text: str = attr.ib(default=None)
@attr.s
class TypedMessage(Message):
def __init__(self):
self.text = None
type: str = attr.ib(default=None)
|
import Bboard
import Wboard
import board
import Bboardbak
import Wboardbak
import Bboardbak2
import Wboardbak2
import Wkiki1
import Wkiki2
import Bkiki1
import Bkiki2
import Bkikimoves1
import gc
def torare():
torare=0
torare2=0
for j in range(80):
b = 0
w = 0
b = Bkiki2.kikilist1[j]
w = Wkiki2.kikilist1[j]
if Wkiki2.kikilist2[j] =='p':
torare -= b*150-w*10
if torare<0:
torare2+=torare
if Wkiki2.kikilist2[j] =='l':
torare -= b*800-w*10
if torare<0:
torare2+=torare
if Wkiki2.kikilist2[j] =='n':
torare -= b*800-w*10
if torare<0:
torare2+=torare
if Wkiki2.kikilist2[j] =='g' or Wkiki2.kikilist2[j] =='s' or Wkiki2.kikilist2[j] =='+p' or Wkiki2.kikilist2[j] =='+l' or Wkiki2.kikilist2[j] =='+n' or Wkiki2.kikilist2[j] =='+s':
torare -= b*1000-w*10
if torare<0:
torare2+=torare
if Wkiki2.kikilist2[j] =='b':
torare -= b*2500
if torare<0:
torare2+=torare
if Wkiki2.kikilist2[j] =='r':
torare -= b*3000
if torare<0:
torare2+=torare
if Wkiki2.kikilist2[j] =='+r' or Wkiki2.kikilist2[j] =='+b':
torare -= b*3200
if torare<0:
torare2+=torare
return torare2
def eval(sfen):
global sashite,score
score =[]
for i in range(len(sfen)):
Bboardbak.yobidashi()
Wboardbak.yobidashi()
mae = sfen[i][0:2]
ushiro = sfen[i][2:4]
nari = sfen[i][4:5]
if mae[1:2]=='*':
exec('Wboard.w{}="{}"'.format(ushiro,mae[0:1].lower()))
else:
exec('Wboard.w{}=Wboard.w{}'.format(ushiro,mae))
if nari == '+':
exec("Wboard.w{}= '+'+Wboard.w{}".format(ushiro,ushiro))
exec('Wboard.koma=Bboard.b{}'.format(ushiro))
if Wboard.koma != '':
exec('Wboard.{}+=1'.format(Wboard.koma[-1:].lower()))
exec("Wboard.w{}=''".format(mae))
exec("Bboard.b{}=''".format(ushiro))
Bboardbak2.kioku()
Wboardbak2.kioku()
board.synth()
Bkikimoves1.move1()
if Bkikimoves1.depth1 == [] and sfen[i][0:2] != 'P*':
print('bestmove '+sfen[i])
return
Wkiki1.culc()
Wkiki2.culc()
Bkiki2.culc()
torare2=torare()
Wboardbak2.yobidashi()
koma=Wboard.p*350+Wboard.l*850+Wboard.n*850+Wboard.s*1250+Wboard.g*1250+Wboard.b*20000+Wboard.r*30000
point=0
point=Wkiki1.kiki1*2+Wkiki2.kiki2*3+torare2+koma
score.append(point)
Bkiki2.kikilist1.clear()
Bkiki2.kikilist2.clear()
Wkiki2.kikilist1.clear()
Wkiki2.kikilist2.clear()
sashite = score.index(max(score))
|
"""
"""
from masterstudent.tensorflowstudent import TensorFlowStudent
from args import get_parser
# make this contextmanager
def load_config(config_path):
"""
"""
from yaml import load
with open(config_path, 'r') as f:
return load(f)
def run_masters_student(backend_type, config):
"""
:param backend_type:
:param config:
:return:
"""
student = None
if backend_type == "tensorflow":
student = TensorFlowStudent(config)
student.start()
if __name__ == "__main__":
args = get_parser().parse_args()
config = load_config(args.config)
try:
backend = config['backend']
run_masters_student(backend, config)
except KeyError:
print('backend not defined')
|
from lib.utils import *
from collections import defaultdict
def loadSegmentsIndex(segFile, DExons):
segRanges = defaultdict(lambda : defaultdict(set))
segTxs = {}
segLens = {}
with open(segFile) as sFile:
for lc, line in enumerate(sFile):
if lc == 0: #skip header
continue
tokens = line.strip().split("\t")
segID, chrm, geneID, txAnIDs, binIDs, start, end, strand, length = tokens[:9]
segTxs[segID] = str2Set(txAnIDs)
segLens[segID] = int(length)
exons = [DExons[int(ex)] for ex in binIDs.split(',')]
if strand == "-":
if int(start) == exons[0].start:
start = str(exons[0].end)
if int(end) == exons[-1].end:
end = str(exons[-1].start)
for i, exon in enumerate(exons):
if strand == "+":
st = int(start) if i == 0 else exon.start
ed = int(end) if i == len(exons)-1 else exon.end
else:
st = int(start) if i == 0 else exon.end
ed = int(end) if i == len(exons)-1 else exon.start
segRanges[geneID][st].add(segID)
segRanges[geneID][ed].add(segID)
segRangesSortedKeys = {}
for geneID in segRanges:
segRangesSortedKeys[geneID] = sorted(segRanges[geneID].keys())
return (segRanges, segRangesSortedKeys, segTxs, segLens)
# A grange in granges list is a tuple: (start,end)
def getSegsForRanges(geneID, granges, txs, index, sortedIdxKeys, segTxs, mode):
#print(granges)
segs = set()
geneIdx = index[geneID]
sortedKeys = sortedIdxKeys[geneID]
for i, grange in enumerate(granges):
segs |= (geneIdx[grange[0]] & geneIdx[grange[1]])
if grange[2] == "E":
#print(sortedKeys)
i = sortedKeys.index(grange[0])+1
while i < len(sortedKeys) and sortedKeys[i] < grange[1]:
segs |= geneIdx[sortedKeys[i]]
i = i+1
strictSegs = set()
for seg in segs:
#use intersection for .flex, subset for strict
if mode == "strict":
if segTxs[seg].subset(txs):
strictSegs.add(seg)
else:
if segTxs[seg].intersection(txs):
strictSegs.add(seg)
return (segs, strictSegs)
def event2Ranges(eventID):
tokens = eventID.strip().replace(';', ':').replace(':-', '^').replace('-', ':').replace('^', ':-').split(':')
geneID, etype, seqname = tokens[:3]
strand = tokens[-1]
## Event Ranges
locs = [int(loc) for loc in tokens[3:-1]]
switchTxs = (etype == "AL" and strand == "+")
## Handle strand-dependent eventTypes
if strand == "-":
if etype == "A5":
etype = "A3"
elif etype == "A3":
etype = "A5"
elif etype == "AF":
etype = "AL"
switchTxs = True
elif etype == "AL":
etype = "AF"
elif etype == "MX":
buf = list(locs)
locs[:4] = buf[4:]
locs[4:] = buf[:4]
switchTxs = True
#print(tokens)
if etype == 'SE': #Skipped Exon
return (geneID, [
[[locs[0], locs[1], "J"], [locs[1], locs[2], "E"], [locs[2], locs[3], "J"]], #Inclusion
[[locs[0], locs[3], "J"]] #Exclusion
], switchTxs, locs[2]-locs[1])
elif etype == 'MX': # Mutually Exclusive Exons
return (geneID, [
[[locs[0], locs[1], "J"], [locs[1], locs[2], "E"], [locs[2], locs[3], "J"]], #Inclusion
[[locs[4], locs[5], "J"], [locs[5], locs[6], "E"], [locs[6], locs[7], "J"]] #Exclusion
], switchTxs, locs[2]-locs[1])
elif etype == 'A5': # Alt. 5' splice-site
return (geneID, [
[[locs[2], locs[2]+1, "J"], [locs[2], locs[0], "E"], [locs[0], locs[1], "J"]], #Inclusion
[[locs[2], locs[3], "J"]] #Exclusion
], switchTxs, locs[0]-locs[2])
elif etype == 'A3': # Alt. 3' splice-site
return (geneID, [
[[locs[0], locs[1], "J"], [locs[1], locs[3], "E"], [locs[3]-1, locs[3], "J"]], #Inclusion
[[locs[0], locs[3], "J"]] #Exclusion
], switchTxs, locs[3]-locs[1])
elif etype == 'RI': #Retained Intron
return (geneID, [
[[locs[1], locs[1]+1, "J"], [locs[1]+1, locs[2]-1, "E"], [locs[2]-1, locs[2], "J"]
#[locs[0], locs[1]], [locs[2], locs[3]]
], #Inclusion
[[locs[1], locs[2], "J"]
#[locs[0], locs[1]], [locs[2], locs[3]]
] #Exclusion
], switchTxs, locs[2]-locs[1])
elif etype == 'AF': #Alt. First Exon
return (geneID, [
[[locs[0], locs[1], "E"], [locs[1], locs[2], "J"]], #Inclusion
[[locs[3], locs[4], "E"], [locs[4], locs[5], "J"]] #Exclusion
], switchTxs, 0)
elif etype == 'AL': #Alt. Last Exon
return (geneID, [
[[locs[3], locs[4], "J"], [locs[4], locs[5], "E"]], #Inclusion
[[locs[0], locs[1], "J"], [locs[1], locs[2], "E"]] #Exclusion
], switchTxs, 0)
else:
return (geneID, [], switchTxs, 0)
def getCumSegLens(segs, segLens):
totLen = sum([segLens[segID] for segID in segs])
return (totLen)
gevent = ""
def getSegsForIOEFile(ioeF, outF, segRanges, sortedIdxKeys, segTxs, segLens, mode):
if mode != "strict":
mode = "flex"
with open(ioeF) as f, open(outF, 'w') as outFile, open(outF+'.'+mode+".evs2segs", 'w') as outFS, open(outF+'.'+mode+'.segs', 'w') as outFSS:
header = '\t'.join(['seqname', 'geneID', 'eventID', 'incSegs', 'exSegs', 'incTxs', 'exTxs',
'incSegLen', 'exSegLen', 'incLen'])+'\n'
outFile.write(header)
outFS.write(header)
outFSS.write(f.readline())
for i, ioeLine in enumerate(f):
#print(i)
#print(ioeLine)
seqname, geneID, eventID, inc_txs, tot_txs = ioeLine.strip().split('\t')
global gevent
gevent = eventID
geneID, ranges, switchTxs, incLen = event2Ranges(eventID)
incTxs = str2Set(inc_txs)
exTxs = str2Set(tot_txs) - incTxs
if switchTxs:
buf = incTxs
incTxs = exTxs
exTxs = buf
try:
incSegs, strictIncSegs = getSegsForRanges(geneID, ranges[0], incTxs, segRanges, sortedIdxKeys, segTxs, mode)
exSegs, strictExSegs = getSegsForRanges(geneID, ranges[1], exTxs, segRanges, sortedIdxKeys, segTxs, mode)
exSegs -= incSegs
except Exception as e:
print("Event "+eventID+" can't be mapped to segments. Annotation doesn't match")
outFile.write('\t'.join([seqname, geneID, eventID,
set2Str(set()), set2Str(set()),
set2Str(incTxs), set2Str(exTxs),
str(0),
str(0),
str(0)]) + "\n")
outFS.write('\t'.join([seqname, geneID, eventID,
set2Str(set()), set2Str(set()),
set2Str(incTxs), set2Str(exTxs),
str(0),
str(0),
str(0)]) + "\n")
outFSS.write('\t'.join([seqname, geneID, eventID,
set2Str(set()), set2Str(set())]) + "\n")
print(e)
continue
if not strictIncSegs or not strictExSegs:
print(eventID, "has empty segments set in either inclusion or exclusion")
outFile.write('\t'.join([seqname, geneID, eventID,
set2Str(incSegs), set2Str(exSegs),
set2Str(incTxs), set2Str(exTxs),
str(getCumSegLens(incSegs, segLens)),
str(getCumSegLens(exSegs, segLens)),
str(incLen)]) + "\n")
outFS.write('\t'.join([seqname, geneID, eventID,
set2Str(strictIncSegs), set2Str(strictExSegs),
set2Str(incTxs), set2Str(exTxs),
str(getCumSegLens(strictIncSegs, segLens)),
str(getCumSegLens(strictExSegs, segLens)),
str(incLen)]) + "\n")
outFSS.write('\t'.join([seqname, geneID, eventID,
set2Str(strictIncSegs), set2Str(strictIncSegs | strictExSegs)]) + "\n")
print(i)
def generateEventsSegsIOE(segFile, DExons, eventsFile, outFname, mode):
segRanges, sortedIdxKeys, segTxs, segLens = loadSegmentsIndex(segFile, DExons)
getSegsForIOEFile(eventsFile, outFname, segRanges, sortedIdxKeys, segTxs, segLens, mode)
#from ReferenceLoader import *
if __name__ == '__main__':
DExons = load_disjointExons("../../output")
generateEventsSegsIOE("../../output/hg19_segs100.fa.meta", DExons, "../../output/hg37_5types_noAlt.ioe",
"../../output/hg19_segs100_hg37_5types_noAlt.ioe", "flex")
|
# pytorch
import pinocchio
from data.data_config import DataGenConfig
import torch
from torch import nn
import numpy as np
from os.path import dirname, abspath, join
from model import ConstrainedCVAE
def inference(pose, z = None):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cvae = ConstrainedCVAE().to(device)
cvae.load_state_dict(torch.load("model/weights/constrained_cvae_weights.pth", map_location=device))
cvae.eval()
with torch.no_grad():
pose = pose.to(device)
q = cvae(desired_pose = pose, z = z)
return q
if __name__ == "__main__":
cat_trajs = np.load('data/franka_panda_insertion_logs/experts/expert_cartesian_poses.npy', allow_pickle=True)
cat_trajs = np.vstack(cat_trajs)
for pose in cat_trajs:
pose = torch.Tensor([pose])
# pose = torch.Tensor([-0.3622564536646905,0.07453657615711093,0.523455111826844,0.6949510146762841,0.6371909076037253,-0.28704989069534576,-0.16921345903719948])
#pose = torch.Tensor([[0.55641491, -0.04412349, 0.18583907, -0.33566937, 0.92741494, -0.13821891, 0.09012842]])#([[0.5, 0, 0.5, 0, 0, 0, 1]])
z = None
#z = torch.Tensor([0, 0, 0])
q = inference(pose=pose, z=z)[0]
print("Generated q: ", q)
pinocchio_model_dir = dirname(dirname(str(abspath(__file__))))
model_path = pinocchio_model_dir + "/learning-ik/resources/" + DataGenConfig.ROBOT
urdf_path = model_path + "/urdf/"+DataGenConfig.ROBOT_URDF
# setup robot model and data
model = pinocchio.buildModelFromUrdf(urdf_path)
data = model.createData()
# setup end effector
ee_name = DataGenConfig.EE_NAME
ee_link_id = model.getFrameId(ee_name)
# joint limits (from urdf)
lower_limit = np.array(model.lowerPositionLimit)
upper_limit = np.array(model.upperPositionLimit)
pinocchio.framesForwardKinematics(model, data, q.cpu().numpy())
desired_pose = pinocchio.SE3ToXYZQUAT(data.oMf[ee_link_id])
print("Desired Pose", pose[:].cpu().numpy())
print("Generated Pose: ", desired_pose[:])
print("Error: ", np.linalg.norm(pose[:].cpu().numpy() - desired_pose[:]))
print("------------------------------------------------------\n")
for i in range (10):
z = torch.Tensor([2*i-1])
q = inference(pose=pose, z=z)
pinocchio.framesForwardKinematics(model, data, q.cpu().numpy())
desired_pose = pinocchio.SE3ToXYZQUAT(data.oMf[ee_link_id])
print(list(q.cpu().numpy()))
print("Desired Pose", pose[:3].cpu().numpy())
print("Generated Pose: ", desired_pose[:3])
print("Error: ", np.linalg.norm(pose[:3].cpu().numpy() - desired_pose[:3]))
print("\n")
|
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def get_unet(img_rows, img_cols):
inputs = Input((img_rows, img_cols, 1))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
def dummynet():
inputs = Input((35, 50, 1))
cc = Conv2D(1, (3, 3), activation='relu', padding='same') (inputs)
outputs = Conv2D(1, (1, 1), activation='sigmoid') (cc)
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss=dice_coef_loss , metrics=[dice_coef])
model.summary()
return model
def mini_unet(img_rows, img_cols):
inputs = Input((img_rows, img_cols, 1))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
#pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
#conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
#conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv4), conv3], axis=3)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv2], axis=3)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv1], axis=3)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
#up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
#conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
#conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv8)
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
inputs = Input((32, 48, 1))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
#pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
#conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
#conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv4), conv3], axis=3)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv2], axis=3)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv1], axis=3)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
#up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
#conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
#conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv8)
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
|
import os
import pickle
from src.configuration.constants import INTERIM_DATA_DIRECTORY, PROCESSED_DATA_DIRECTORY, RAW_DATA_DIRECTORY, INTERIM, \
PROCESSED, RAW
DIRNAME_TO_DIRECTORY = {
INTERIM: INTERIM_DATA_DIRECTORY,
PROCESSED: PROCESSED_DATA_DIRECTORY,
RAW: RAW_DATA_DIRECTORY,
}
def load_dataset(dirname: str, filename: str):
directory = DIRNAME_TO_DIRECTORY[dirname]
pickle_extension = '.pickle'
if pickle_extension not in filename:
filename += pickle_extension
filepath = os.path.join(directory, filename)
with open(filepath, 'rb') as f:
dataset = pickle.load(f)
return dataset
|
class Solution:
def numSubmatrixSumTarget(self, A: List[List[int]], target: int) -> int:
m, n = len(A), len(A[0])
for row in A:
for i in range(1, n):
row[i] += row[i - 1]
cnt = 0
for left in range(n):
for right in range(left, n):
memo = collections.Counter()
memo[0] = 1
cur = 0
for i in range(m):
cur += A[i][right]
cur -= A[i][left - 1] if left >= 1 else 0
cnt += memo[cur - target]
memo[cur] += 1
return cnt
|
import logging
from project.settings import DEBUG
logger = logging.getLogger(name='project')
logger.setLevel(logging.DEBUG)
format = '%(asctime)s [%(levelname)s] %(filename)s: %(message)s'
formatter = logging.Formatter(format, datefmt='%d.%m.%Y %H:%M:%S')
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
# создаём консольный handler и задаём уровень
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
# добавляем formatter
console_handler.setFormatter(formatter)
# добавляем к logger
logger.addHandler(console_handler)
|
from tempfile import NamedTemporaryFile
import uuid
import time
import pytest
import requests
from labelbox import DataRow
def test_get_data_row(datarow, client):
assert client.get_data_row(datarow.uid)
def test_lookup_data_rows(client, dataset):
uid = str(uuid.uuid4())
# 1 external id : 1 uid
dr = dataset.create_data_row(row_data="123", external_id=uid)
lookup = client.get_data_row_ids_for_external_ids([uid])
assert len(lookup) == 1
assert lookup[uid][0] == dr.uid
# 2 external ids : 1 uid
uid2 = str(uuid.uuid4())
dr2 = dataset.create_data_row(row_data="123", external_id=uid2)
lookup = client.get_data_row_ids_for_external_ids([uid, uid2])
assert len(lookup) == 2
assert all([len(x) == 1 for x in lookup.values()])
assert lookup[uid][0] == dr.uid
assert lookup[uid2][0] == dr2.uid
#1 external id : 2 uid
dr3 = dataset.create_data_row(row_data="123", external_id=uid2)
lookup = client.get_data_row_ids_for_external_ids([uid2])
assert len(lookup) == 1
assert len(lookup[uid2]) == 2
assert lookup[uid2][0] == dr2.uid
assert lookup[uid2][1] == dr3.uid
# Empty args
lookup = client.get_data_row_ids_for_external_ids([])
assert len(lookup) == 0
# Non matching
lookup = client.get_data_row_ids_for_external_ids([str(uuid.uuid4())])
assert len(lookup) == 0
def test_data_row_bulk_creation(dataset, rand_gen, image_url):
client = dataset.client
assert len(list(dataset.data_rows())) == 0
# Test creation using URL
task = dataset.create_data_rows([
{
DataRow.row_data: image_url
},
{
"row_data": image_url
},
])
assert task in client.get_user().created_tasks()
task.wait_till_done()
assert task.status == "COMPLETE"
data_rows = list(dataset.data_rows())
assert len(data_rows) == 2
assert {data_row.row_data for data_row in data_rows} == {image_url}
# Test creation using file name
with NamedTemporaryFile() as fp:
data = rand_gen(str).encode()
fp.write(data)
fp.flush()
task = dataset.create_data_rows([fp.name])
task.wait_till_done()
assert task.status == "COMPLETE"
task = dataset.create_data_rows([{
"row_data": fp.name,
'external_id': 'some_name'
}])
task.wait_till_done()
assert task.status == "COMPLETE"
task = dataset.create_data_rows([{"row_data": fp.name}])
task.wait_till_done()
assert task.status == "COMPLETE"
data_rows = list(dataset.data_rows())
assert len(data_rows) == 5
url = ({data_row.row_data for data_row in data_rows} - {image_url}).pop()
assert requests.get(url).content == data
data_rows[0].delete()
@pytest.mark.slow
def test_data_row_large_bulk_creation(dataset, image_url):
# Do a longer task and expect it not to be complete immediately
n_local = 2000
n_urls = 250
with NamedTemporaryFile() as fp:
fp.write("Test data".encode())
fp.flush()
task = dataset.create_data_rows([{
DataRow.row_data: image_url
}] * n_local + [fp.name] * n_urls)
task.wait_till_done()
assert task.status == "COMPLETE"
assert len(list(dataset.data_rows())) == n_local + n_urls
@pytest.mark.xfail(reason="DataRow.dataset() relationship not set")
def test_data_row_single_creation(dataset, rand_gen, image_url):
client = dataset.client
assert len(list(dataset.data_rows())) == 0
data_row = dataset.create_data_row(row_data=image_url)
assert len(list(dataset.data_rows())) == 1
assert data_row.dataset() == dataset
assert data_row.created_by() == client.get_user()
assert data_row.organization() == client.get_organization()
assert requests.get(image_url).content == \
requests.get(data_row.row_data).content
assert data_row.media_attributes is not None
with NamedTemporaryFile() as fp:
data = rand_gen(str).encode()
fp.write(data)
fp.flush()
data_row_2 = dataset.create_data_row(row_data=fp.name)
assert len(list(dataset.data_rows())) == 2
assert requests.get(data_row_2.row_data).content == data
def test_data_row_update(dataset, rand_gen, image_url):
external_id = rand_gen(str)
data_row = dataset.create_data_row(row_data=image_url,
external_id=external_id)
assert data_row.external_id == external_id
external_id_2 = rand_gen(str)
data_row.update(external_id=external_id_2)
assert data_row.external_id == external_id_2
def test_data_row_filtering_sorting(dataset, image_url):
task = dataset.create_data_rows([
{
DataRow.row_data: image_url,
DataRow.external_id: "row1"
},
{
DataRow.row_data: image_url,
DataRow.external_id: "row2"
},
])
task.wait_till_done()
# Test filtering
row1 = list(dataset.data_rows(where=DataRow.external_id == "row1"))
assert len(row1) == 1
row1 = dataset.data_rows_for_external_id("row1")
assert len(row1) == 1
row1 = row1[0]
assert row1.external_id == "row1"
row2 = list(dataset.data_rows(where=DataRow.external_id == "row2"))
assert len(row2) == 1
row2 = dataset.data_rows_for_external_id("row2")
assert len(row2) == 1
row2 = row2[0]
assert row2.external_id == "row2"
# Test sorting
assert list(
dataset.data_rows(order_by=DataRow.external_id.asc)) == [row1, row2]
assert list(
dataset.data_rows(order_by=DataRow.external_id.desc)) == [row2, row1]
def test_data_row_deletion(dataset, image_url):
task = dataset.create_data_rows([{
DataRow.row_data: image_url,
DataRow.external_id: str(i)
} for i in range(10)])
task.wait_till_done()
data_rows = list(dataset.data_rows())
expected = set(map(str, range(10)))
assert {dr.external_id for dr in data_rows} == expected
for dr in data_rows:
if dr.external_id in "37":
dr.delete()
expected -= set("37")
data_rows = list(dataset.data_rows())
assert {dr.external_id for dr in data_rows} == expected
DataRow.bulk_delete([dr for dr in data_rows if dr.external_id in "2458"])
expected -= set("2458")
data_rows = list(dataset.data_rows())
assert {dr.external_id for dr in data_rows} == expected
def test_data_row_iteration(dataset, image_url) -> None:
task = dataset.create_data_rows([
{
DataRow.row_data: image_url
},
{
"row_data": image_url
},
])
task.wait_till_done()
assert next(dataset.data_rows())
def test_data_row_attachments(dataset, image_url):
attachments = [("IMAGE", image_url), ("TEXT", "test-text"),
("IMAGE_OVERLAY", image_url), ("HTML", image_url)]
task = dataset.create_data_rows([{
"row_data": image_url,
"external_id": "test-id",
"attachments": [{
"type": attachment_type,
"value": attachment_value
}]
} for attachment_type, attachment_value in attachments])
task.wait_till_done()
assert task.status == "COMPLETE"
data_rows = list(dataset.data_rows())
assert len(data_rows) == len(attachments)
for data_row in data_rows:
assert len(list(data_row.attachments())) == 1
assert data_row.external_id == "test-id"
with pytest.raises(ValueError) as exc:
task = dataset.create_data_rows([{
"row_data": image_url,
"external_id": "test-id",
"attachments": [{
"type": "INVALID",
"value": "123"
}]
}])
def test_create_data_rows_sync_attachments(dataset, image_url):
attachments = [("IMAGE", image_url), ("TEXT", "test-text"),
("IMAGE_OVERLAY", image_url), ("HTML", image_url)]
attachments_per_data_row = 3
dataset.create_data_rows_sync([{
"row_data":
image_url,
"external_id":
"test-id",
"attachments": [{
"type": attachment_type,
"value": attachment_value
} for _ in range(attachments_per_data_row)]
} for attachment_type, attachment_value in attachments])
data_rows = list(dataset.data_rows())
assert len(data_rows) == len(attachments)
for data_row in data_rows:
assert len(list(data_row.attachments())) == attachments_per_data_row
def test_create_data_rows_sync_mixed_upload(dataset, image_url):
n_local = 100
n_urls = 100
with NamedTemporaryFile() as fp:
fp.write("Test data".encode())
fp.flush()
dataset.create_data_rows_sync([{
DataRow.row_data: image_url
}] * n_urls + [fp.name] * n_local)
assert len(list(dataset.data_rows())) == n_local + n_urls
def test_delete_data_row_attachment(datarow, image_url):
attachments = []
to_attach = [("IMAGE", image_url), ("TEXT", "test-text"),
("IMAGE_OVERLAY", image_url), ("HTML", image_url)]
for attachment_type, attachment_value in to_attach:
attachments.append(
datarow.create_attachment(attachment_type, attachment_value))
for attachment in attachments:
attachment.delete()
assert len(list(datarow.attachments())) == 0
|
import requests,json,os,subprocess
# 发post请求,获取5条内容(title、author、下载url)
def getContent(music_name,type):
print('正在获取'+type+'资源...')
url = 'http://music.ifkdy.com/'
data = {'input': music_name,
'filter': 'name',#kuwo netease
'type': type,
'page': '1'}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'#这个必须带 否则出错
}
try:
response = requests.post(url,headers=headers,data=data)
Html_dict = json.loads(response.content.decode())
music1= Html_dict['data'][0]
music2 = Html_dict['data'][1]
music3 = Html_dict['data'][2]
music4 = Html_dict['data'][3]
music5 = Html_dict['data'][4]
print(type+'资源获取成功')
return music1, music2, music3, music4, music5
except:
print(type+'资源获取失败')
return []
#选择是否下载?下载哪一个?
def select(musicTuple):
if musicTuple==[]:
return 0
print('-------------------音乐目录-------------------\n0、不下载')
num = 1
for music in musicTuple:
print(str(num) + '、' + music['title'] + ' ' + music['author'])
num += 1
choice = input('请输入数字序号:')
if choice==0:
return 0
else:
return musicTuple[int(choice)-1]
#开始下载
def download(music):
if music==0:
return 0
else:
print('-------------------开始下载-------------------')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'}
try:
data = requests.get(music['url'], headers=headers, stream=True)
if (not (os.path.exists('e://音乐spider'))):
os.mkdir('e://音乐spider')
with open('E://音乐spider//{}.mp3'.format(music['title'] + ' ' + music['author']), 'wb')as f:
for j in data.iter_content(chunk_size=512):
f.write(j)
print('下载成功:' + music['title'] + ' ' + music['author'])
path = 'e://音乐spider//{}.mp3'.format(music['title'] + ' ' + music['author'])
return path
except:
print('下载失败:'+music['title'] + ' ' + music['author'])
def run():
music_name = input('请输入音乐名称或音乐人姓名:')
#发post请求,获取5条内容(title、author、下载url)
musicTuple = getContent(music_name,'netease')
#选择是否下载?true-->下载哪一个?true--->return music false--->return
music = select(musicTuple)
#开始下载
path = download(music)
#音乐播放
subprocess.Popen(path,shell=True)
if __name__ == '__main__':
run() |
from django.db import models
from django.utils.translation import activate
# Create your models here.
class Movie(models.Model):
name = models.CharField(max_length=50, unique=True)
description = models.CharField(max_length=250, null=False)
active = models.BooleanField(default=True)
|
import csv
import os
from os.path import join, dirname
import pickle
import numpy as np
from plotly.offline import plot
import plotly.graph_objs as go
from collections import Counter
import json
import writeToS3 as s3
import argparse
import deleteDir as d
import notification as n
class Classification:
def __init__(self, awsPath, localSavePath, filename):
self.localSavePath = localSavePath
self.awsPath = awsPath
self.filename = filename
def predict(self):
# load classification model
pkl_model = os.path.join(self.localSavePath,'classification_pipeline.pickle')
with open(pkl_model,'rb') as f:
text_clf = pickle.load(f)
# load text set
data = []
try:
with open(self.localSavePath + 'UNLABELED_' + self.filename + '.csv','r',encoding='utf-8') as f:
reader = list(csv.reader(f))
for row in reader[1:]:
try:
data.extend(row)
except Exception as e:
pass
except:
with open(self.localSavePath + 'UNLABELED_' + self.filename + '.csv','r',encoding='ISO-8859-1') as f:
reader = list(csv.reader(f))
for row in reader[1:]:
try:
data.extend(row)
except Exception as e:
pass
# predict using trained model
self.predicted = text_clf.predict(data)
# save result
fname = 'PREDICTED_' + self.filename + '.csv'
try:
with open(self.localSavePath + fname,'w',newline="",encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(['text','category'])
for i in range(len(data)):
try:
writer.writerow([data[i],self.predicted[i]])
except:
pass
except:
with open(self.localSavePath + fname,'w',newline="",encoding='ISO-8859-1') as f:
writer = csv.writer(f)
writer.writerow(['text','category'])
for i in range(len(data)):
try:
writer.writerow([data[i],self.predicted[i]])
except:
pass
s3.upload(self.localSavePath, self.awsPath, fname)
return s3.generate_downloads(self.awsPath, fname)
def plot(self):
y_pred_dict = Counter(self.predicted)
labels = []
values = []
for i in y_pred_dict.keys():
labels.append("class: " + str(i))
values.append(y_pred_dict[i])
trace = go.Pie(labels=labels, values = values, textinfo='label')
div_comp = plot([trace], output_type='div',image='png',auto_open=False, image_filename='plot_img')
fname_div_comp = 'div_comp.html'
with open(self.localSavePath + fname_div_comp,"w") as f:
f.write(div_comp)
s3.upload(self.localSavePath, self.awsPath, fname_div_comp)
return s3.generate_downloads(self.awsPath, fname_div_comp)
if __name__ == '__main__':
output = dict()
parser = argparse.ArgumentParser(description="processing...")
parser.add_argument('--remoteReadPath', required=True)
parser.add_argument('--uuid',required=True)
parser.add_argument('--s3FolderName',required=True)
parser.add_argument('--email',required=True)
args = parser.parse_args()
uid = args.uuid
awsPath = args.s3FolderName + '/ML/classification/' + uid +'/'
localSavePath = '/tmp/' + args.s3FolderName + '/ML/classification/' + uid + '/'
if not os.path.exists(localSavePath):
os.makedirs(localSavePath)
if not os.path.exists(localSavePath):
os.makedirs(localSavePath)
# download config to local folder
fname_config = 'config.json'
if s3.checkExist(awsPath, fname_config):
s3.downloadToDisk(fname_config, localSavePath, awsPath)
with open(localSavePath + fname_config, "r") as fp:
data = json.load(fp)
for key in vars(args).keys():
if key not in data.keys():
data[key] = vars(args)[key]
with open(localSavePath + fname_config,"w") as f:
json.dump(data,f)
s3.upload(localSavePath, awsPath, fname_config)
output['config'] = s3.generate_downloads(awsPath, fname_config)
output['uuid'] = uid
else:
raise ValueError('This session ID is invalid!')
exit()
# download unlabeled data to local folder
filename = args.remoteReadPath.split('/')[-2]
fname_unlabeled = 'UNLABELED_' + filename +'.csv'
if s3.checkExist(awsPath, fname_unlabeled):
s3.downloadToDisk(fname_unlabeled, localSavePath, awsPath)
else:
raise ValueError('You\'re requesting ' + fname_unlabeled + ' file, and it\'s not found in your remote directory!\
It is likely that you have not yet performed step 1 -- split the dataset into training and predicting set, or you have provided the wrong sessionID.')
exit()
#download pickle model to local folder
fname_pickle = 'classification_pipeline.pickle'
if s3.checkExist(awsPath, fname_pickle):
s3.downloadToDisk(fname_pickle, localSavePath, awsPath)
else:
raise ValueError('You\'re requesting ' + fname_pickle + ' file, and it\'s not found in your remote directory! \
It is likely that you have not yet performed step 2 -- model training, or you have provided the wrong sessionID.')
exit()
classification = Classification(awsPath, localSavePath, filename)
output['predict'] = classification.predict()
output['div'] = classification.plot()
d.deletedir('/tmp')
n.notification(args.email,case=3,filename=awsPath)
|
from django import forms
from django.forms.widgets import PasswordInput, TextInput
class LoginForm(forms.Form):
username = forms.CharField(widget=TextInput(attrs={'class': 'nes-input'}), max_length=150)
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'nes-input'}))
class EmployeeForm(forms.Form):
ADA = "/icons/ada.png"
BODVAR = "/icons/bodvar.png"
CASSIDY = "/icons/cassidy.png"
GNASH = "/icons/gnash.png"
HATTORI = "/icons/hattori.png"
LORD_VRAXX = "/icons/lord_vraxx.png"
ORION = "/icons/orion.png"
QUEEN_NAI = "/icons/queen_nai.png"
SCARLET = "/icons/scarlet.png"
SENTINEL = "/icons/sentinel.png"
SIR_ROLAND = "/icons/sir_roland.png"
THATCH = "/icons/thatch.png"
WU_SHANG = "/icons/wu_shang.png/"
AVATAR_CHOICES = [
(ADA, "Ada"),
(BODVAR, "Bodvar"),
(CASSIDY, "Cassidy"),
(GNASH, "Gnash"),
(HATTORI, "Hattori"),
(LORD_VRAXX, "Lord Vraxx"),
(ORION, "Orion"),
(QUEEN_NAI, "Queen Nai"),
(SCARLET, "Scarlet"),
(SIR_ROLAND, "Sir Roland"),
(THATCH, "Thatch"),
(WU_SHANG, "Wu Shang"),
]
username = forms.CharField(widget=TextInput(attrs={'class': 'nes-input'}), max_length=150)
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'nes-input'}))
displayname = forms.CharField(widget=TextInput(attrs={'class': 'nes-input'}), max_length=30)
avatar = forms.ChoiceField(widget=forms.Select(attrs={'class':'nes-input'}), choices = AVATAR_CHOICES)
position = forms.CharField(widget=TextInput(attrs={'class': 'nes-input'}), max_length=150, required=False)
first_name = forms.CharField(widget=TextInput(attrs={'class': 'nes-input'}), max_length=150, required=False)
last_name = forms.CharField(widget=TextInput(attrs={'class': 'nes-input'}), max_length=150, required=False)
email = forms.EmailField(widget=TextInput(attrs={'class': 'nes-input'}), required=False) |
import os
import random
import hashlib
import string
import kopf
import kubernetes
import kubernetes.client
notebook_startup = """#!/bin/bash
conda init
source $HOME/.bashrc
if [ ! -f $HOME/.condarc ]; then
cat > $HOME/.condarc << EOF
envs_dirs:
- $HOME/.conda/envs
EOF
fi
if [ -d $HOME/.conda/envs/workspace ]; then
echo "Activate virtual environment 'workspace'."
conda activate workspace
fi
if [ ! -f $HOME/.jupyter/jupyter_notebook_config.json ]; then
mkdir -p $HOME/.jupyter
cat > $HOME/.jupyter/jupyter_notebook_config.json << EOF
{
"NotebookApp": {
"password": "%(password_hash)s"
}
}
EOF
fi
"""
@kopf.on.create("jupyter-on-kubernetes.test", "v1alpha1", "jupyternotebooks", id="jupyter")
def create(name, uid, namespace, spec, logger, **_):
apps_api = kubernetes.client.AppsV1Api()
core_api = kubernetes.client.CoreV1Api()
extensions_api = kubernetes.client.ExtensionsV1beta1Api()
algorithm = "sha1"
salt_len = 12
characters = string.ascii_letters + string.digits
password = "".join(random.sample(characters, 16))
h = hashlib.new(algorithm)
salt = ("%0" + str(salt_len) + "x") % random.getrandbits(4 * salt_len)
h.update(bytes(password, "utf-8") + salt.encode("ascii"))
password_hash = ":".join((algorithm, salt, h.hexdigest()))
config_map_body = {
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": name,
"labels": {
"app": name
}
},
"data": {
"setup-environment.sh": notebook_startup % dict(password_hash=password_hash)
}
}
kopf.adopt(config_map_body)
core_api.create_namespaced_config_map(namespace=namespace, body=config_map_body)
notebook_interface = spec.get("notebook", {}).get("interface", "lab")
image = spec.get("deployment", {}).get("image", "jupyter/minimal-notebook:latest")
service_account = spec.get("deployment", {}).get("serviceAccountName", "default")
memory_limit = spec.get("deployment", {}).get("resources", {}).get("limits", {}).get("memory", "512Mi")
memory_request = spec.get("deployment", {}).get("resources", {}).get("requests", {}).get("memory", memory_limit)
deployment_body = {
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name": name,
"labels": {
"app": name
}
},
"spec": {
"replicas": 1,
"selector": {
"matchLabels": {
"deployment": name
}
},
"strategy": {
"type": "Recreate"
},
"template": {
"metadata": {
"labels": {
"deployment": name
}
},
"spec": {
"serviceAccountName": service_account,
"containers": [
{
"name": "notebook",
"image": image,
"imagePullPolicy": "Always",
"resources": {
"requests": {
"memory": memory_request
},
"limits": {
"memory": memory_limit
}
},
"ports": [
{
"name": "8888-tcp",
"containerPort": 8888,
"protocol": "TCP",
}
],
"env": [],
"volumeMounts": [
{
"name": "startup",
"mountPath": "/usr/local/bin/before-notebook.d"
}
]
}
],
"securityContext": {
"fsGroup": 0
},
"volumes": [
{
"name": "startup",
"configMap": {
"name": "notebook"
}
}
]
},
},
},
}
if notebook_interface != "classic":
deployment_body["spec"]["template"]["spec"]["containers"][0]["env"].append(
{"name": "JUPYTER_ENABLE_LAB", "value": "true"})
storage_request = ""
storage_limit = ""
storage_claim_name = spec.get("storage", {}).get("claimName", "")
storage_sub_path = spec.get("storage", {}).get("claimName", "")
if not storage_claim_name:
storage_request = spec.get("deployment", {}).get("resources", {}).get("requests", {}).get("storage", "")
storage_limit = spec.get("deployment", {}).get("resources", {}).get("limits", {}).get("storage", "")
if storage_request or storage_limit:
volume = {"name": "data", "persistentVolumeClaim": {"claimName": "notebook"}}
deployment_body["spec"]["template"]["spec"]["volumes"].append(volume)
storage_mount = {"name": "data", "mountPath": "/home/jovyan"}
deployment_body["spec"]["template"]["spec"]["containers"][0]["volumeMounts"].append(storage_mount)
persistent_volume_claim_body = {
"apiVersion": "v1",
"kind": "PersistentVolumeClaim",
"metadata": {
"name": name,
"labels": {
"app": name
}
},
"spec": {
"accessModes": ["ReadWriteOnce"],
"resources": {
"requests": {},
"limits": {}
}
}
}
if storage_request:
persistent_volume_claim_body["spec"]["resources"]["requests"]["storage"] = storage_request
if storage_limit:
persistent_volume_claim_body["spec"]["resources"]["limits"]["storage"] = storage_limit
kopf.adopt(persistent_volume_claim_body)
core_api.create_namespaced_persistent_volume_claim(namespace=namespace,
body=persistent_volume_claim_body)
else:
volume = {"name": "data", "persistentVolumeClaim": {"claimName": storage_claim_name}}
deployment_body["spec"]["template"]["spec"]["volumes"].append(volume)
storage_mount = {"name": "data", "mountPath": "/home/jovyan", "subPath": storage_sub_path}
deployment_body["spec"]["template"]["spec"]["containers"][0]["volumeMounts"].append(storage_mount)
kopf.adopt(deployment_body)
apps_api.create_namespaced_deployment(namespace=namespace, body=deployment_body)
service_body = {
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": name,
"labels": {
"app": name
}
},
"spec": {
"type": "ClusterIP",
"ports": [
{
"name": "8888-tcp",
"port": 8888,
"protocol": "TCP",
"targetPort": 8888,
}
],
"selector": {
"deployment": name
},
},
}
kopf.adopt(service_body)
core_api.create_namespaced_service(namespace=namespace, body=service_body)
ingress_domain = os.environ.get("INGRESS_DOMAIN")
ingress_hostname = f"notebook-{namespace}.{ingress_domain}"
ingress_body = {
"apiVersion": "extensions/v1beta1",
"kind": "Ingress",
"metadata": {
"name": name,
"labels": {
"app": name
},
"annotations": {
"projectcontour.io/websocket-routes": "/"
}
},
"spec": {
"rules": [
{
"host": ingress_hostname,
"http": {
"paths": [
{
"path": "/",
"backend": {
"serviceName": name,
"servicePort": 8888,
},
}
]
}
}
]
}
}
kopf.adopt(ingress_body)
extensions_api.create_namespaced_ingress(namespace=namespace, body=ingress_body)
return {
"notebook" : {
"url": f"http://{ingress_hostname}",
"password": password,
"interface": notebook_interface,
},
"deployment": {
"image": image,
"serviceAccountName": service_account,
"resources": {
"requests": {
"memory": memory_request,
"storage": storage_request
},
"limits": {
"memory": memory_limit,
"storage": storage_limit
}
}
},
"storage": {
"claimName": storage_claim_name,
"subPath": storage_sub_path
}
}
|
from django.utils.cache import add_never_cache_headers
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
from django.contrib.auth.middleware import RemoteUserMiddleware
class NoReferral(MiddlewareMixin):
def process_response(self, request, response):
response['X-Content-Type-Options'] = 'nosniff'
return response
class NoCache(MiddlewareMixin):
def process_response(self, request, response):
if 'Cache-Control' not in response:
add_never_cache_headers(response)
return response
class AuthproxyUserMiddleware(RemoteUserMiddleware):
header = 'HTTP_X_FORWARDED_USER'
is_admin_header = 'HTTP_X_FORWARDED_USER_ADMIN'
def process_request(self, request):
if not settings.HOOVER_AUTHPROXY:
return
super().process_request(request)
user = request.user
is_admin = (request.META.get(self.is_admin_header) == 'true')
if is_admin != user.is_superuser or is_admin != user.is_staff:
user.is_superuser = is_admin
user.is_staff = is_admin
user.save()
|
##### Implementation of SCAFFOLOD #####
##### importing libraries #####
import copy
import random, argparse
from numpy.core import records
import torch
import numpy as np
from tqdm import tqdm
from solvers import create_solver
from utils import util
from data import create_dataloader, create_dataset
from options import options as option
from torch import utils as vutils
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Super Resolution in SCAFFOLD')
parser.add_argument('-opt', type=str, required=True)
opt = option.parse(parser.parse_args().opt)
##### random seed #####
seed = opt['solver']['manual_seed']
if seed is None: seed = random.randint(1, 10000)
print("=====> Random Seed: %d" %seed)
torch.manual_seed(seed)
##### hyperparameters for federated learning #####
num_clients = opt['fed']['num_clients']
num_selected = int(num_clients * opt['fed']['sample_fraction'])
num_rounds = opt['fed']['num_rounds']
client_epochs = opt['fed']['epochs']
##### create dataloader for client and server #####
for phase, dataset_opt in sorted(opt['datasets'].items()):
if phase == 'train':
train_set = create_dataset(dataset_opt)
train_set_split = vutils.data.random_split(
train_set, [int(len(train_set) / num_clients) for _ in range(num_clients)])
train_loaders = [create_dataloader(x, dataset_opt) for x in train_set_split]
print("=====> Train Dataset: %s" %train_set.name())
print("=====> Number of image in each client: %d" %len(train_set_split[0]))
if train_loaders is None:
raise ValueError("[Error] The training data does not exist")
elif phase == 'val':
val_set = create_dataset(dataset_opt)
val_loader = create_dataloader(val_set, dataset_opt)
print('======> Val Dataset: %s, Number of images: [%d]' %(val_set.name(), len(val_set)))
else:
raise NotImplementedError("[Error] Dataset phase [%s] in *.json is not recognized." % phase)
##### create model and solver for client and server #####
scale = opt['scale']
client_solvers = [create_solver(opt) for _ in range(num_clients)]
global_solver = create_solver(opt)
model_name = opt['networks']['which_model'].upper()
print('===> Start Train')
print("==================================================")
print("Method: %s || Scale: %d || Total round: %d " %(model_name, scale, num_rounds))
##### create solver log for saving #####
solver_log = global_solver.get_current_log()
start_round = solver_log['round']
##### helper function for federated training #####
def train(global_w, c_global, client_solver, train_loader, train_set, total_epoch, c_local):
cnt = 0
for epoch in range(1, total_epoch+1):
train_loss_list = []
for iter, batch in enumerate(train_loader):
client_solver.feed_data(batch)
iter_loss = client_solver.train_step()
batch_size = batch['LR'].size(0)
train_loss_list.append(iter_loss * batch_size)
net_para = client_solver.model.state_dict()
lr = client_solver.get_current_learning_rate()
for key in net_para:
net_para[key] = net_para[key]-lr*(c_global[key]-c_local[key])
client_solver.model.load_state_dict(net_para)
cnt += 1
c_new = copy.deepcopy(c_local)
c_delta = copy.deepcopy(c_local)
net_para = client_solver.model.state_dict()
for key in net_para:
c_new[key] = c_new[key]-c_global[key]+(global_w[key]-net_para[key])/(cnt*lr)
c_delta[key] = c_new[key]-c_local[key]
c_local = copy.deepcopy(c_new)
###### Update lr #####
client_solver.update_learning_rate(epoch)
return sum(train_loss_list)/len(train_set), c_delta
def FedAvg(w, weight_avg=None):
if weight_avg == None:
weight_avg = [1/len(w) for i in range(len(w))]
w_avg = copy.deepcopy(w[0])
for k in w_avg.keys():
w_avg[k] = w_avg[k].cuda() * weight_avg[0]
for k in w_avg.keys():
for i in range(1, len(w)):
w_avg[k] = w_avg[k].cuda() + w[i][k].cuda() * weight_avg[i]
return w_avg
def Test(global_solver, val_loader, solver_log, current_r):
psnr_list = []
ssim_list = []
val_loss_list = []
for iter, batch in enumerate(val_loader):
global_solver.feed_data(batch)
iter_loss = global_solver.test()
val_loss_list.append(iter_loss)
##### Calculate psnr/ssim metrics #####
visuals = global_solver.get_current_visual()
psnr, ssim = util.calc_metrics(visuals['SR'], visuals['HR'], crop_border=scale)
psnr_list.append(psnr)
ssim_list.append(ssim)
# ##### record loss/psnr/ssim #####
solver_log['records']['val_loss'].append(' ')
solver_log['records']['val_loss'].append(sum(val_loss_list)/len(val_loss_list))
solver_log['records']['psnr'].append(' ')
solver_log['records']['psnr'].append(sum(psnr_list)/len(psnr_list))
solver_log['records']['ssim'].append(' ')
solver_log['records']['ssim'].append(sum(ssim_list)/len(ssim_list))
##### record the best epoch #####
round_is_best = False
if solver_log['best_pred'] < (sum(psnr_list)/len(psnr_list)):
solver_log['best_pred'] = (sum(psnr_list)/len(psnr_list))
round_is_best = True
solver_log['best_round'] = current_r
print("PSNR: %.2f SSIM: %.4f Loss: %.6f Best PSNR: %.2f in Round: [%d]"
%(sum(psnr_list)/len(psnr_list), sum(ssim_list)/len(ssim_list), sum(val_loss_list)/len(val_loss_list),
solver_log['best_pred'], solver_log['best_round']))
global_solver.set_current_log(solver_log)
global_solver.save_checkpoint(current_r, round_is_best)
global_solver.save_current_log()
return sum(val_loss_list)/len(val_loss_list), sum(psnr_list)/len(psnr_list),\
sum(ssim_list)/len(ssim_list)
##### Initializing models #####
global_model = global_solver.model
global_w = global_model.state_dict()
for i in range(num_clients):
client_solvers[i].model.load_state_dict(global_w)
initial_state_dict = copy.deepcopy(global_model.state_dict())
server_state_dict = copy.deepcopy(global_model.state_dict())
total = 0
for name, param in global_model.named_parameters():
total += np.prod(param.size())
c_global = copy.deepcopy(initial_state_dict)
for key in initial_state_dict.keys():
c_global[key] = torch.zeros_like(initial_state_dict[key])
for idx in range(num_clients):
c_local = copy.deepcopy(c_global)
##### start training #####
total_train_loss = []
total_val_loss = []
psnr_list = []
ssim_list = []
for r in range(1, num_rounds+1):
##### select random clients #####
m = max(int(num_selected), 1)
clients_idx = np.random.choice(range(num_clients), m, replace=False)
clients_losses = []
total_delta = copy.deepcopy(initial_state_dict)
for key in total_delta:
total_delta[key] = torch.zeros_like(initial_state_dict[key])
with tqdm(total=num_selected, desc='Round: [%d/%d]'%(r, num_rounds), miniters=1) as t:
for i in clients_idx:
client_solvers[i].model.load_state_dict(copy.deepcopy(global_w))
loss, c_delta = train(
global_w, c_global, client_solvers[i], train_loaders[i],
train_set_split[i], client_epochs, c_local)
clients_losses.append(copy.deepcopy(loss))
solver_log['records']['client_idx'].append(i)
solver_log['records']['client_loss'].append(loss)
t.set_postfix_str('Client loss: %.6f' %loss)
t.update()
for key in total_delta:
total_delta[key] = total_delta[key] + c_delta[key]
for key in total_delta:
total_delta[key] /= len(clients_idx)
for key in c_global:
if c_global[key].type() == 'torch.LongTensor':
c_global[key] += total_delta[key].type(torch.LongTensor)
elif c_global[key].type() == 'torch.cuda.LongTensor':
c_global[key] += total_delta[key].type(torch.cuda.LongTensor)
else:
c_global[key] += total_delta[key]
w_locals = []
for i in clients_idx:
w_locals.append(copy.deepcopy(client_solvers[i].model.state_dict()))
ww = FedAvg(w_locals)
global_w = copy.deepcopy(ww)
global_model.load_state_dict(global_w)
loss_avg = sum(clients_losses)/len(clients_losses)
print("Round: %d, Average Loss: %.6f" %(r, loss_avg))
solver_log['records']['agg_loss'].append(' ')
solver_log['records']['agg_loss'].append(loss_avg)
##### Validating #####
print('=====> Validating...')
val_loss, psnr, ssim = Test(global_solver, val_loader, solver_log, r)
print("\n")
print('===> Finished !')
|
from btchip.btchip import btchip
from btchip.bitcoinTransaction import bitcoinTransaction
from struct import unpack
from btchip.bitcoinVarint import readVarint, writeVarint
from btchip.bitcoinTransaction import bitcoinInput, bitcoinOutput
class btchip_xsh(btchip):
def getTrustedInput(self, transaction, index):
result = {}
# Header
apdu = [ self.BTCHIP_CLA, self.BTCHIP_INS_GET_TRUSTED_INPUT, 0x00, 0x00 ]
params = bytearray.fromhex("%.8x" % (index))
params.extend(transaction.version)
params.extend(transaction.time)
writeVarint(len(transaction.inputs), params)
apdu.append(len(params))
apdu.extend(params)
self.dongle.exchange(bytearray(apdu))
# Each input
for trinput in transaction.inputs:
apdu = [ self.BTCHIP_CLA, self.BTCHIP_INS_GET_TRUSTED_INPUT, 0x80, 0x00 ]
params = bytearray(trinput.prevOut)
writeVarint(len(trinput.script), params)
apdu.append(len(params))
apdu.extend(params)
self.dongle.exchange(bytearray(apdu))
offset = 0
while True:
blockLength = 251
if ((offset + blockLength) < len(trinput.script)):
dataLength = blockLength
else:
dataLength = len(trinput.script) - offset
params = bytearray(trinput.script[offset : offset + dataLength])
if ((offset + dataLength) == len(trinput.script)):
params.extend(trinput.sequence)
apdu = [ self.BTCHIP_CLA, self.BTCHIP_INS_GET_TRUSTED_INPUT, 0x80, 0x00, len(params) ]
apdu.extend(params)
self.dongle.exchange(bytearray(apdu))
offset += dataLength
if (offset >= len(trinput.script)):
break
# Number of outputs
apdu = [ self.BTCHIP_CLA, self.BTCHIP_INS_GET_TRUSTED_INPUT, 0x80, 0x00 ]
params = []
writeVarint(len(transaction.outputs), params)
apdu.append(len(params))
apdu.extend(params)
self.dongle.exchange(bytearray(apdu))
# Each output
indexOutput = 0
for troutput in transaction.outputs:
apdu = [ self.BTCHIP_CLA, self.BTCHIP_INS_GET_TRUSTED_INPUT, 0x80, 0x00 ]
params = bytearray(troutput.amount)
writeVarint(len(troutput.script), params)
apdu.append(len(params))
apdu.extend(params)
self.dongle.exchange(bytearray(apdu))
offset = 0
while (offset < len(troutput.script)):
blockLength = 255
if ((offset + blockLength) < len(troutput.script)):
dataLength = blockLength
else:
dataLength = len(troutput.script) - offset
apdu = [ self.BTCHIP_CLA, self.BTCHIP_INS_GET_TRUSTED_INPUT, 0x80, 0x00, dataLength ]
apdu.extend(troutput.script[offset : offset + dataLength])
self.dongle.exchange(bytearray(apdu))
offset += dataLength
# Locktime
apdu = [ self.BTCHIP_CLA, self.BTCHIP_INS_GET_TRUSTED_INPUT, 0x80, 0x00, len(transaction.lockTime) ]
apdu.extend(transaction.lockTime)
response = self.dongle.exchange(bytearray(apdu))
result['trustedInput'] = True
result['value'] = response
return result
class shieldTransaction(bitcoinTransaction):
def __init__(self, data=None):
self.version = ""
self.time = ""
self.inputs = []
self.outputs = []
self.lockTime = ""
self.witness = False
self.witnessScript = ""
if data is not None:
offset = 0
self.version = data[offset:offset + 4]
offset += 4
if self.version[0] != 0x04:
checktime = unpack("<L", data[offset:offset + 4])[0]
if (checktime >= 1507311610) and (checktime <= 1540188138):
self.time = data[offset:offset + 4]
offset += 4
if (data[offset] == 0) and (data[offset + 1] != 0):
offset += 2
self.witness = True
inputSize = readVarint(data, offset)
offset += inputSize['size']
numInputs = inputSize['value']
for i in range(numInputs):
tmp = { 'buffer': data, 'offset' : offset}
self.inputs.append(bitcoinInput(tmp))
offset = tmp['offset']
outputSize = readVarint(data, offset)
offset += outputSize['size']
numOutputs = outputSize['value']
for i in range(numOutputs):
tmp = { 'buffer': data, 'offset' : offset}
self.outputs.append(bitcoinOutput(tmp))
offset = tmp['offset']
if self.witness:
self.witnessScript = data[offset : len(data) - 4]
self.lockTime = data[len(data) - 4:]
else:
self.lockTime = data[offset:offset + 4]
|
from django.apps import AppConfig
class CustomEmailUserConfig(AppConfig):
name = 'custom_email_user'
|
# -*- coding:utf-8 -*-
import pandas as pd
path = 'dataSet//'
result_one = pd.read_csv(path + 'result_b_20180706181411.csv', encoding='utf-8', sep='\t')
result_two = pd.read_csv(path + 'result_b_20180706093641.csv', encoding='utf-8', sep='\t')
# print(result_one[result_one['RST'] > 0.1])
# print(result_two[result_two['RST'] > 0.1])
max_data_one = result_one['RST'].max()
min_data_one = result_one['RST'].min()
max_data_two = result_two['RST'].max()
min_data_two = result_two['RST'].min()
# result_one['RST'] = [(max_data_one - index) / (max_data_one - min_data_one) for index in result_one['RST']]
# result_two['RST'] = [(max_data_two - index) / (max_data_two - min_data_two) for index in result_two['RST']]
# print(result_one)
# print(result_two)
result_one = result_one.append(result_two)
result_one = result_one.reset_index().drop(['index'], axis=1)
print(result_one)
result_one.to_csv(path + 'fusion.csv', encoding='utf-8', sep='\t', index=None)
|
import pytest
from beatx.utils import import_string
class TestImportString:
def test_import_module(self):
mod = import_string('os.path')
assert hasattr(mod, 'abspath')
def test_import_function(self):
pow = import_string('math.pow')
assert pow(2, 3) == 8
def test_import_non_existent(self):
with pytest.raises(ImportError):
import_string('os.non_existent')
def test_import_non_module(self):
with pytest.raises(ImportError):
import_string('os')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.