content
stringlengths 5
1.05M
|
|---|
import unittest
from gifts._cosine_similarity import cosine_similarity
class TestCosine(unittest.TestCase):
def test(self):
# https://www.learndatasci.com/glossary/cosine-similarity/
d1 = [0, 0, 0, 1, 1, 1, 1, 1, 2, 1, 2, 0, 1, 0]
d2 = [0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1]
d3 = [1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]
self.assertAlmostEqual(
cosine_similarity(d1, d2),
0.6885303726590963)
self.assertAlmostEqual(
cosine_similarity(d1, d3),
0.21081851067789195)
self.assertAlmostEqual(
cosine_similarity(d2, d3),
0.2721655269759087)
|
from html import escape
from datetime import datetime
from atlassian import Confluence
from jinja2 import Environment, PackageLoader, select_autoescape
from .html_coverage import HTMLData
from .models import Module
def make_table(rows):
# TODO: use templating
table = "<table><tbody><tr><th>Name</th><th>Status (OK/Total)</th><th>Status (%)</th></tr>"
for name, status, prc_status in rows:
table += (
'<tr><td><pre>{}</pre></td><td style="text-align: right;">{}</td>'
'<td style="text-align: right;">{:.2f}%</td></tr>'
).format(name, status, prc_status)
table += "</tbody></table>"
return table
def make_report_rows(module, indent=0):
rows = []
for (item_name, value) in module.modules.items():
if isinstance(value, Module):
ok, total, prc_ok = value.stats
name = " " * indent + escape(item_name.split("::")[-1])
rows.append((name, "{}/{}".format(ok, total), prc_ok))
rows.extend(make_report_rows(value, indent + 4))
return rows
def get_confluence_setup(config, feature_prefix):
track_config = config.inicfg.config.sections["pytest_track"]
api = Confluence(
url=track_config["confluence_url"],
username=track_config["confluence_username"],
password=track_config["confluence_password"],
)
parent_id = track_config["confluence_{}_parent_page_id".format(feature_prefix)]
page_title = track_config["confluence_{}_page_title".format(feature_prefix)]
return api, parent_id, page_title
def upload_to_confluence(config, body: str, feature_prefix: str):
api, parent_id, page_title = get_confluence_setup(config, feature_prefix)
page_title = page_title.format(datetime.now()) # allow dates in names
response = api.update_or_create(parent_id, page_title, body=body)
links = response["_links"]
page_url = "{}{}".format(links["base"], links["webui"])
print("Page available at {}".format(page_url))
def upload_track_report(report, config):
rows = make_report_rows(report.tests)
body = "<p>{}</p>".format(make_table(rows))
upload_to_confluence(config, body, feature_prefix="report")
def make_html_coverage_page():
pass
def upload_html_coverage_report(html_data: HTMLData, config):
html_data.stats(should_print=False)
loader = PackageLoader("pytest_track")
env = Environment(
loader=loader,
autoescape=select_autoescape(["html"]),
)
template = env.get_template("html_coverage.html")
body = template.render({"data": html_data})
upload_to_confluence(config, body, feature_prefix="coverage")
|
# -*- coding: utf-8 -*-
# @Time : 2021/1/12 下午2:49
# @Author : zhongyuan
# @Email : zhongyuandt@gmail.com
# @File : train.py
# @Software: PyCharm
from model import Model
from datasets.wineDataset import WineDataset
import torch.utils.data.dataloader as Dataloader
import torch.optim as optim
import os
from torch.autograd import Variable
import torch
from logger import Logger
from cosine_lr_scheduler import CosineDecayLR
from loss import WineLoss
def setup_seed(seed=19960715):
torch.manual_seed(seed) # cpu
torch.cuda.manual_seed(seed) #gpu
torch.backends.cudnn.deterministic=True # cudnn
def train():
os.environ["CUDA_VISIBLE_DEVICES"] = "5"
log = Logger()
epochs = 90
### model ###
net = Model().cuda()
log.write(str(net))
### dataset ###
train_dataset = WineDataset()
train_dataloader = Dataloader.DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=8)
### optim ###
lr = 1e-4
optimizer = optim.SGD(net.parameters(), lr=lr, weight_decay=0.0005, momentum=0.9)
criterion = WineLoss().cuda()
### scheduler ###
scheduler = CosineDecayLR(optimizer=optimizer, T_max=epochs*len(train_dataloader),lr_init=lr,
lr_min=lr*0.01,warmup=5*len(train_dataloader))
net.train()
for epoch in range(epochs):
for i, (x, label) in enumerate(train_dataloader):
scheduler.step(len(train_dataloader) * epoch + i)
x, label = Variable(x.cuda()), Variable(label.cuda())
pred = net(x)
loss = criterion(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
log.write("epoch %3d, iter %2d, lr %2.10f, loss %2.4f"%
(epoch, i, optimizer.param_groups[0]['lr'], loss.item()))
if loss <= .001:
torch.save(net.state_dict(),"chkpt/weights.pth")
log.write("save weights in chkpt/weights.pth")
exit(0)
torch.save(net.state_dict(),"chkpt/last_weights.pth")
log.write("save weights in chkpt/weights.pth")
if __name__ == "__main__":
setup_seed()
train()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 16 19:47:50 2017
@author: lfoul, Antonin ROSA-MARTIN (aka MrWormsy) and IGIRANEZA Beda
"""
import http.client
from urllib.parse import urlparse
import socket
class Sensor:
"""
A Sensor is represented as a URL, a label and a list of threshold
"""
def __init__(self, url, label, thresholds):
self.url = url
self.label = label
self.thresholds = thresholds
"""
Set a GeneralConfiguration to the Sensor
setGeneralConfiguration : Sensor, GeneralConfiguration --> None
"""
def setGeneralConfiguration(self, generalConfiguration):
self.generalConfiguration = generalConfiguration
"""
Set an Emoticon to the Sensor with the emoticon parameters
setEmoticon : Sensor, Emoticon --> None
"""
def setEmoticon(self, emoticon):
self.emoticon = emoticon
self.emoticon.setEmoticonParameters(self.generalConfiguration.getEmoticonSize())
"""
Set a Button to the Sensor
setButton : Sensor, Button --> None
"""
def setButton(self, button):
self.button = button
"""
Set a id to the Sensor
setSensorId : Sensor, int --> None
"""
def setSensorId(self, sensorId):
self.sensorId = sensorId
"""
Set a value to the Sensor
setSensorValue : Sensor, float --> None
"""
def setSensorValue(self, sensorValue):
self.sensorValue = sensorValue
"""
Get the GeneralConfiguration of the Sensor
getGeneralConfiguration : Sensor --> GeneralConfiguration
"""
def getGeneralConfiguration(self):
return self.generalConfiguration
"""
Get the id of the Sensor
getSensorId : Sensor --> int
"""
def getSensorId(self):
return self.sensorId
"""
Get the label of the Sensor
getLabel : Sensor --> String
"""
def getLabel(self):
return self.label
"""
Get the value of the Sensor
getSensorValue : Sensor --> float
"""
def getSensorValue(self):
return self.sensorValue
"""
Get the thresholds of the Sensor
getSensorValue : Sensor --> List
"""
def getThresholds(self):
return self.thresholds
"""
Checks if the connection to the sensor is set (return True if set, False otherwise)
isConnectedToUrl : Sensor --> boolean
"""
def isConnectedToUrl(self):
self.parsedUrl = urlparse(self.url)
self.connection = http.client.HTTPSConnection(self.parsedUrl.netloc)
try:
self.connection.request('GET', self.url)
except socket.error:
return False
else:
self.response = self.connection.getresponse()
return self.response.status == http.client.OK
"""
Reads the sensor and return the float value if the connection is set
read : Sensor --> float
"""
def read(self):
if self.isConnectedToUrl():
#We replace b and ' with nothing to get the float value
return float(str(self.response.read()).replace("b", "").replace("'", ""))
else:
return None
"""
Map a value between two starting values to a value between two target values
mapValue : float, float, float, float, float --> float
"""
def mapValue(value, minA, maxA, minB, maxB):
return minB + (((value - minA)*(maxB - minB))/(maxA - minA))
"""
Map the value read by the sensor according to what the instructions and return it beween -1 and 1
getTransformedValue : Sensor --> float
"""
def getTransformedValue(self):
value = float(self.read())
if (value == self.thresholds[1]) :
return 0
elif (value < self.thresholds[0]):
return -1
elif (value > self.thresholds[2]):
return 1
elif (value > self.thresholds[0] and value < self.thresholds[1]):
return (Sensor.mapValue(value, self.thresholds[0], self.thresholds[1], 0.0, 1.0) - 1)
else:
return (Sensor.mapValue(value, self.thresholds[1], self.thresholds[2], 0.0, 1.0))
"""
Draws the Emoticon of the Sensor according to the transformed value
drawEmoticon : Sensor --> None
"""
def drawEmoticon(self):
self.emoticon.draw(self.getTransformedValue())
"""
Draws the Button of the Sensor
drawButton : Sensor --> None
"""
def drawButton(self):
self.button.draw()
|
#============================ adjust path =====================================
import sys
import os
if __name__ == "__main__":
here = sys.path[0]
sys.path.insert(0, os.path.join(here, '..'))
#============================ imports =========================================
from SmartMeshSDK import ApiException
from SmartMeshSDK.ApiDefinition.IpMgrDefinition import IpMgrDefinition
from SmartMeshSDK.ApiDefinition.IpMoteDefinition import IpMoteDefinition
from SmartMeshSDK.ApiDefinition.HartMgrDefinition import HartMgrDefinition
from SmartMeshSDK.ApiDefinition.HartMoteDefinition import HartMoteDefinition
#============================ templates =======================================
TMPL_CLASSDEF = '''\'\'\'
This module was generated automatically. Do not edit directly.
\'\'\'
import collections
from SmartMeshSDK import ApiException
from {MODULE_NAME} import {BASE_CLASS_NAME}
##
# \\addtogroup {GEN_CLASS_NAME}
# \\{{
#
class {GEN_CLASS_NAME}({BASE_CLASS_NAME}):
\'\'\'
\\brief {BRIEF_DESCRIPTION}
\'\'\'
'''
TMPL_DEF = '''
##
# The named tuple returned by the {CMD_NAME}() function.
#
{TUPLE_COMMENT}
#
Tuple_{CMD_NAME} = collections.namedtuple("Tuple_{CMD_NAME}", {TUPLE_PARAMS})
##
# {DESCR}
#
{CMD_COMMENT}
#
# \\returns The response to the command, formatted as a #Tuple_{CMD_NAME} named tuple.
#
def {CMD_NAME}(self, {CMD_PARMS}) :
res = {BASE_CLASS_NAME}.send(self, {NAMES}, {{{PARAMS_DICT}}})
return {CLASS_NAME}.Tuple_{CMD_NAME}(**res)
'''
TMPL_DEF_LIST = '''
##
# The named tuple returned by the {CMD_NAME}() function.
#
{TUPLE_COMMENT}
#
Tuple_{CMD_NAME} = collections.namedtuple("Tuple_{CMD_NAME}", {TUPLE_PARAMS})
##
# {DESCR}
#
{CMD_COMMENT}
#
# \\returns The response to the command, formatted as a list of #Tuple_{CMD_NAME} named tuple.
#
def {CMD_NAME}(self, {CMD_PARMS}) :
res = {BASE_CLASS_NAME}.send(self, {NAMES}, {{{PARAMS_DICT}}})
tupleList = []
for r in res :
tupleList.append({CLASS_NAME}.Tuple_{CMD_NAME}(**r))
return tupleList
'''
TMPL_DEF_NOTUPLE = '''
##
# {DESCR}
#
{CMD_COMMENT}
#
# \\returns The response to the command.
#
def {CMD_NAME}(self, {CMD_PARMS}) :
res = {BASE_CLASS_NAME}.send(self, {NAMES}, {{{PARAMS_DICT}}})
return res
'''
TMPL_NOTIF = '''
##
# \\brief {NOTIF_NAME_UP} notification.
#
# {DESCR}
#
{NOTIF_COMMENT}
#
{NOTIF_NAME_UP} = "{NOTIF_NAME}"
notifTupleTable[{NOTIF_NAME_UP}] = Tuple_{NOTIF_NAME} = collections.namedtuple("Tuple_{NOTIF_NAME}", {NOTIF_PARAMS})
'''
TMPL_NOTIF_NOTUPLE = '''
##
# \\brief Notification {NOTIF_NAME_UP}
#
# {DESCR}
#
{NOTIF_NAME_UP} = "{NOTIF_NAME}"
notifTupleTable[{NOTIF_NAME_UP}] = None
'''
TMPL_GETNOTIFICATION = '''
##
# \\brief Get a notification from the notification queue, and returns
# it properly formatted.
#
# \\exception NotificationError if unknown notification.
#
def getNotification(self, timeoutSec=-1) :
temp = self.getNotificationInternal(timeoutSec)
if not temp:
return temp
(ids, param) = temp
try :
if {CLASS_NAME}.notifTupleTable[ids[-1]] :
return (ids[-1], {CLASS_NAME}.notifTupleTable[ids[-1]](**param))
else :
return (ids[-1], None)
except KeyError :
raise ApiException.NotificationError(ids, param)
'''
START_LOCATION_COMMENT = '''
# \param numFrames 1-byte field formatted as an integer.<br/>
# There is no restriction on the value of this field.
# \param mobileMote 8-byte field formatted as hex.<br/>
# There is no restriction on the value of this field.
# \param fixedMotes list of 8-byte fields formatted as hex.<br/>
# There is no restriction on the value of this field.
#
'''
START_LOCATION_PAYLOAD = '''
payload = []
for fm in fixedMotes :
payload += fm
res = {BASE_CLASS_NAME}.send(self, ['startLocation'], {{"numFrames" : numFrames, "mobileMote" : mobileMote, "fixedMotes" : payload}})
'''
TMPL_ENDCLASSDEF = '''
##
# end of {GEN_CLASS_NAME}
# \\}}
#
'''
def printStartLocation(names, respFieldsName, reqFieldsName,
CMD_NAME, CMD_PARMS, DESCR,
TUPLE_PARAMS, NAMES, PARAMS_DICT,
BASE_CLASS_NAME, CLASS_NAME,
TUPLE_COMMENT, CMD_COMMENT) :
s = TMPL_DEF.format(CMD_NAME = CMD_NAME, CMD_PARMS = CMD_PARMS, DESCR = DESCR,
TUPLE_PARAMS = TUPLE_PARAMS, NAMES = NAMES, PARAMS_DICT = PARAMS_DICT,
BASE_CLASS_NAME = BASE_CLASS_NAME, CLASS_NAME = CLASS_NAME,
TUPLE_COMMENT = TUPLE_COMMENT, CMD_COMMENT = '***')
lines = s.split('\n')
res = []
for l in lines :
if l == '***' :
res += [START_LOCATION_COMMENT[1:]]
elif l.find('send(self') >= 0:
res += [START_LOCATION_PAYLOAD[1:].format(BASE_CLASS_NAME=BASE_CLASS_NAME)]
else :
res += [l + '\n']
return ''.join(res)
RADIOTEST_TX_COMMENT = '''
# \param type 1-byte field formatted as an integer.<br/>
# Type of transmission test: 0=packet, 1=continuous modulation (CM), 2=continuous wave (CW)
# \param mask 2-bytes field formatted as an integer.<br/>
# Mask of channels(0-15) enabled for test.
# \param txPower 1-byte field formatted as an sign integer.<br/>
# Transmit power, in dB. Valid values are 0 (power amplifier off) and 8 (power amplifier on).
# \param numRepeats 2-byte field formatted as an integer.<br/>
# Number of times to repeat the packet sequence (0=do not stop). Applies only to packet transmission tests.
# \param tests list of pair of integer.<br/>
# Sequence definitions (up to 10) specifies the length (bytes) and after-packet delay (usec) for each packets
'''
RADIOTEST_TX_PAYLOAD = '''
d = {{"type" : type, "mask" : mask, "numRepeats" : numRepeats, "txPower" : txPower, "numPackets" : len(tests)}}
for i in xrange(10) :
if i < len(tests) :
l, g = tests[i]
else :
l, g = (0, 0)
d["pkSize" + str(i+1)] = l
d["gap" + str(i+1)] = g
res = {BASE_CLASS_NAME}.send(self, ['radiotestTx'], d)
'''
def printRsadioTestTx(names, respFieldsName, reqFieldsName,
CMD_NAME, CMD_PARMS, DESCR,
TUPLE_PARAMS, NAMES, PARAMS_DICT,
BASE_CLASS_NAME, CLASS_NAME,
TUPLE_COMMENT, CMD_COMMENT) :
s = TMPL_DEF.format(CMD_NAME = CMD_NAME, CMD_PARMS = CMD_PARMS, DESCR = DESCR,
TUPLE_PARAMS = TUPLE_PARAMS, NAMES = NAMES, PARAMS_DICT = PARAMS_DICT,
BASE_CLASS_NAME = BASE_CLASS_NAME, CLASS_NAME = CLASS_NAME,
TUPLE_COMMENT = TUPLE_COMMENT, CMD_COMMENT = '***')
lines = s.split('\n')
res = []
for l in lines :
if l == '***' :
res += [RADIOTEST_TX_COMMENT[1:]]
elif l.find('def dn_radioTestTx') >= 0 :
res += ' def dn_radioTestTx(self, type, mask, txPower, numRepeats, tests) :\n'
elif l.find('send(self') >= 0:
res += [RADIOTEST_TX_PAYLOAD[1:].format(BASE_CLASS_NAME=BASE_CLASS_NAME)]
else :
res += [l + '\n']
return ''.join(res)
'''
Dictionary of commands with special processing:
{'cmdName' : [requestFields, responseFields, generator]}
def generator(names, respFieldsName, reqFieldsName,
CMD_NAME, CMD_PARMS, DESCR, TUPLE_PARAMS, NAMES, PARAMS_DICT,
BASE_CLASS_NAME, CLASS_NAME, TUPLE_COMMENT, CMD_COMMENT) :
return strings
'''
specialCmd = {
'dn_startLocation' : [
['numFrames', 'mobileMote', 'fixedMotes'],
['RC', 'callbackId'],
printStartLocation
],
'dn_radioTestTx' : [
['type', 'mask', 'numRepeats', 'txPower', 'numPackets',
'pkSize1', 'gap1', 'pkSize2', 'gap2', 'pkSize3', 'gap3', 'pkSize4', 'gap4', 'pkSize5', 'gap5',
'pkSize6', 'gap6', 'pkSize7', 'gap7', 'pkSize8', 'gap8', 'pkSize9', 'gap9', 'pkSize10', 'gap10'],
['RC'],
printRsadioTestTx
]
}
class GenApiConnectors(object):
#======================== public ==========================================
def __init__(self, apiDefName, myClassName, baseClassName, baseModuleName, outputFileName = None, briefDescription = '', apiDefClass=None):
if apiDefName:
apiDefClass = globals()[apiDefName]
self.apiDef = apiDefClass()
self.myClassName = myClassName
self.baseClassName = baseClassName
self.baseModuleName = baseModuleName
self.briefDescription = briefDescription
if outputFileName:
self.outFile = open(outputFileName, "wt")
else:
self.outFile = sys.stdout
def gen(self):
s = TMPL_CLASSDEF.format(MODULE_NAME = self.baseModuleName,
BASE_CLASS_NAME = self.baseClassName,
GEN_CLASS_NAME = self.myClassName,
BRIEF_DESCRIPTION = self.briefDescription)
self.outFile.write(s)
self.outFile.write('\n #======================== commands ========================================\n')
self.genCmd()
self.outFile.write('\n #======================== notifications ===================================\n')
self.genNotif()
s = TMPL_ENDCLASSDEF.format(GEN_CLASS_NAME = self.myClassName)
self.outFile.write(s)
self.genFinish()
#======================== private =========================================
#===== commands
def genCmd(self):
cmdNames = self.apiDef.getNames(self.apiDef.COMMAND)
for name in cmdNames :
self.genOneCmd([name], [], [])
def genOneCmd(self, names, respFieldsName, reqFieldsName):
# get request fields
r = self.apiDef.getRequestFieldNames(names)
reqFieldsName += [n for n in r if n not in self.apiDef.RESERVED]
# get response fields
try:
r = self.apiDef.getResponseFieldNames(self.apiDef.COMMAND, names)
respFieldsName += [n for n in r if n not in self.apiDef.RESERVED]
except ApiException.CommandError :
# means that this function has no response fields, which is OK
pass
if self.apiDef.hasSubcommands(self.apiDef.COMMAND, names):
subcmdsName = self.apiDef.getNames(self.apiDef.COMMAND, names)
for sn in subcmdsName :
self.genOneCmd(names+[sn], respFieldsName[:], reqFieldsName[:])
else:
cmdName = 'dn_'
cmdName += '_'.join([n for n in names])
cmdParams = ', '.join([p for p in reqFieldsName])
paramsDict = ', '.join(['"{0}" : {1}'.format(p, p) for p in reqFieldsName])
descr = self.apiDef.getDescription(self.apiDef.COMMAND, names).replace('\n', '\n # ')
cmdComment = ''.join([self.getCmdComments(names, p) for p in reqFieldsName])[:-1]
if not cmdComment:
cmdComment = ' # '
tupleComment = ''.join([self.getCmdTupleComments(names, p) for p in respFieldsName])[:-1]
if cmdName in specialCmd and specialCmd[cmdName][0] == reqFieldsName and specialCmd[cmdName][1] == respFieldsName :
s = specialCmd[cmdName][2](names, respFieldsName, reqFieldsName,
CMD_NAME = cmdName,
CMD_PARMS = cmdParams,
DESCR = descr,
TUPLE_PARAMS = respFieldsName,
NAMES = names,
PARAMS_DICT = paramsDict,
BASE_CLASS_NAME = self.baseClassName,
CLASS_NAME = self.myClassName,
TUPLE_COMMENT = tupleComment,
CMD_COMMENT = cmdComment)
else :
if respFieldsName :
cmd_metadata = self.apiDef.getDefinition(self.apiDef.COMMAND, names)
if ('isResponseArray' in cmd_metadata) :
s = TMPL_DEF_LIST.format(CMD_NAME = cmdName,
CMD_PARMS = cmdParams,
DESCR = descr,
TUPLE_PARAMS = respFieldsName,
NAMES = names,
PARAMS_DICT = paramsDict,
BASE_CLASS_NAME = self.baseClassName,
CLASS_NAME = self.myClassName,
TUPLE_COMMENT = tupleComment,
CMD_COMMENT = cmdComment)
else :
s = TMPL_DEF.format(CMD_NAME = cmdName,
CMD_PARMS = cmdParams,
DESCR = descr,
TUPLE_PARAMS = respFieldsName,
NAMES = names,
PARAMS_DICT = paramsDict,
BASE_CLASS_NAME = self.baseClassName,
CLASS_NAME = self.myClassName,
TUPLE_COMMENT = tupleComment,
CMD_COMMENT = cmdComment)
else :
s = TMPL_DEF_NOTUPLE.format(CMD_NAME = cmdName,
CMD_PARMS = cmdParams,
DESCR = descr,
NAMES = names,
PARAMS_DICT = paramsDict,
BASE_CLASS_NAME = self.baseClassName,
CMD_COMMENT = cmdComment)
self.outFile.write(s)
#===== notifications
def genNotif(self):
# write header
output = []
output += [' \n']
output += [' ##\n']
output += [' # Dictionary of all notification tuples.\n']
output += [' #\n']
output += [' notifTupleTable = {}\n']
output += [' ']
self.outFile.write(''.join(output))
# generate all notifications
notifIds = self.apiDef.getIds(self.apiDef.NOTIFICATION)
for notifId in notifIds :
notifName = self.apiDef.idToName(self.apiDef.NOTIFICATION, notifId)
self.genOneNotif([notifName], [])
s = TMPL_GETNOTIFICATION.format(BASE_CLASS_NAME=self.baseClassName,
CLASS_NAME=self.myClassName)
self.outFile.write(s)
def genOneNotif(self, names, fieldNames) :
try :
f = self.apiDef.getResponseFieldNames(self.apiDef.NOTIFICATION, names)
if not f :
raise KeyError
fieldNames += [n for n in f if n not in self.apiDef.RESERVED]
except (NameError, KeyError, ApiException.CommandError) :
pass
try :
subcmdsName = self.apiDef.getNames(self.apiDef.NOTIFICATION, names)
for sn in subcmdsName :
self.genOneNotif(names + [sn], fieldNames[:])
except ApiException.CommandError :
notifName = names[-1]
descr = self.apiDef.getDescription(self.apiDef.NOTIFICATION, names).replace('\n', '\n # ')
if fieldNames:
tupleName = "Tuple_"+notifName
notifComment = ' # Formatted as a {0} named tuple.'.format(tupleName)
notifComment += ' It contains the following fields:\n'
notifComment += ''.join([self.getNotifComments(names,tupleName,fieldName) for fieldName in fieldNames])[:-1]
if not notifComment:
notifComment = ' # '
s = TMPL_NOTIF.format(NOTIF_NAME = notifName,
NOTIF_NAME_UP = notifName.upper(),
NOTIF_PARAMS = fieldNames,
DESCR = descr,
NOTIF_COMMENT = notifComment)
else :
s = TMPL_NOTIF_NOTUPLE.format(NOTIF_NAME = notifName,
NOTIF_NAME_UP = notifName.upper(),
DESCR = descr)
self.outFile.write(s)
#===== end
def genFinish(self):
if self.outFile != sys.stdout :
self.outFile.close()
#======================== helpers =========================================
def getCmdTupleComments(self, names, param):
format = self.apiDef.getResponseFieldFormat(self.apiDef.COMMAND, names, param)
length = self.apiDef.getResponseFieldLength(self.apiDef.COMMAND, names, param)
options = self.apiDef.getResponseFieldOptions(self.apiDef.COMMAND, names, param)
s = ' # - <tt>{NAME}</tt>: {LEN}-byte field formatted as a {FMT}.<br/>\n'.format(NAME=param,
LEN=length,
FMT=format)
s += self.getValidationComment(options)
return s
def getCmdComments(self, names, param):
format = self.apiDef.getRequestFieldFormat(names, param)
length = self.apiDef.getRequestFieldLength(names, param)
options = self.apiDef.getRequestFieldOptions(names, param)
s = ' # \param {NAME} {LEN}-byte field formatted as a {FMT}.<br/>\n'.format(NAME=param,
LEN=length,
FMT=format)
s += self.getValidationComment(options)
return s
def getNotifComments(self, names, tupleName, fieldName):
format = self.apiDef.getResponseFieldFormat(self.apiDef.NOTIFICATION,
names,
fieldName)
length = self.apiDef.getResponseFieldLength(self.apiDef.NOTIFICATION,
names,
fieldName)
options = self.apiDef.getResponseFieldOptions(self.apiDef.NOTIFICATION,
names,
fieldName)
s = ' # - <tt>{NAME}</tt> {LEN}-byte field formatted as a {FMT}.<br/>\n'.format(NAME=fieldName,
LEN=length,
FMT=format)
s += self.getValidationComment(options)
return s
def getValidationComment(self, options):
if not options.validOptions:
return ' # There is no restriction on the value of this field.\n'
s = ' # This field can only take one of the following values:\n'
for i in range(len(options.validOptions)):
s += ' # - {0}: {1}\n'.format(options.validOptions[i], options.optionDescs[i])
return s
def genFile(srcFileName, dstFileName, comment):
if isinstance(srcFileName, str):
apiDefClass = None
apiDefName = os.path.splitext(os.path.basename(srcFileName))[0]
else:
apiDefClass = srcFileName
apiDefName = None
baseName = os.path.splitext(os.path.basename(dstFileName))[0]
gen = GenApiConnectors(apiDefName=apiDefName,
apiDefClass=apiDefClass,
myClassName=baseName,
baseClassName=baseName + "Internal",
baseModuleName=baseName + "Internal",
outputFileName=dstFileName,
briefDescription=comment)
gen.gen()
def main() :
if len(sys.argv) < 3:
print("Usage: GenApiConnectors <apiDefinitionFile> <resultFile> [<comment>]")
return 1
comment = ''
if len(sys.argv) > 3:
comment = sys.argv[3]
genFile(sys.argv[1], sys.argv[2], comment)
if __name__ == '__main__':
main()
|
"""
Command-line interface for valar-dagger.
"""
from __future__ import annotations
import logging
from pathlib import Path
import typer
from valardagger.watcher import Watcher
logger = logging.getLogger(__package__)
cli = typer.Typer()
@cli.command()
def start(path: Path) -> None:
"""
Starts the scheduler.
"""
with Watcher(path):
logger.info(f"Started watching at {path}")
@cli.command()
def backup(name: str) -> None:
"""
Runs a backup configuration.
"""
if __name__ == "__main__":
cli()
|
__author__ = 'jbs'
import ship_data
def test():
print ship_data.boat_heading
ship_data.boat_heading = 7
print ship_data.boat_heading
test()
|
#! /usr/bin/python
# by edward silher for collecting gps data in conjuction with AIS data
# edwardsihler@ursusonline.net
import serial
import subprocess
import os
from gps import *
from time import *
import time
import threading
gpsd = None #seting the global variable
os.system('clear') #clear the terminal (optional)
class GpsPoller(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
global gpsd #bring it in scope
gpsd = gps(mode=WATCH_ENABLE) #starting the stream of info
self.current_value = None
self.running = True #setting the thread running to true
def run(self):
global gpsd
while gpsp.running:
gpsd.next() #this will continue to loop and grab EACH set of gpsd info to clear the buffer
# def utc(self):
# return gpsd.utc
def main (argv):
#find the port with the AIS reciver on it
usbPort = subprocess.check_output("dmesg | grep \"FTDI.*now attached to ttyUSB\"", shell=True)
i = usbPort.rfind("ttyUSB")
aisPort = '/dev/' + usbPort[i:].strip()
#aisPort = '/dev/ttyUSB0'
ais = serial.Serial(aisPort, 38400, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE)
global gpsp
gpsp = GpsPoller() # create the thread
try:
gpsp.start() # start it up
while True:
#It may take a second or two to get good data
#print gpsd.fix.latitude,', ',gpsd.fix.longitude,' Time: ',gpsd.utc
#os.system('clear')
msg = ''
msg = str(gpsd.utc)
msg += ", " + str(gpsd.fix.latitude)
msg += ", " + str(gpsd.fix.longitude )
try:
msg += ", " + ais.readline().strip()
print(msg)
except serial.SerialException:
time.sleep(5)
print (msg)
#time.sleep(5) #set to whatever
except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
print "\nKilling Thread..."
gpsp.running = False
gpsp.join() # wait for the thread to finish what it's doing
print "Done.\nExiting."
if __name__ == '__main__':
main(sys.argv[1:])
|
from django import forms
from django.contrib.auth import get_user_model
class UserForm(forms.ModelForm):
class Meta:
model = get_user_model()
fields = ['first_name', 'last_name', 'email', 'is_superuser', 'username', 'password', 'groups']
widgets = {
'password': forms.PasswordInput,
'groups': forms.CheckboxSelectMultiple
}
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['last_name'].required = True
self.fields['email'].required = True
class UserUpdateForm(UserForm):
class Meta(UserForm.Meta):
exclude = ('password',)
|
from .validator import validate_params, Param, GET, FORM, PATH, JSON, HEADER
from .rules import (
AbstractRule,
CompositeRule,
Enum,
IsDatetimeIsoFormat,
IsEmail,
MaxLength,
MinLength,
Max,
Min,
NotEmpty,
Pattern,
)
|
# -*- coding: utf-8 -*-
import query
import unittest
class TestAlgorithms(unittest.TestCase):
def setUp(self):
# Generate some BS coordinates that are in the range of all
# elevation services.
self.latlons = [
(40.03488860164351, -105.27230724626),
(40.03498860164351, -105.27230724626),
(40.03508860164351, -105.27230724626),
(40.03518860164351, -105.27230724626),
(40.03528860164351, -105.27230724626),
(40.03538860164351, -105.27230724626),
(40.03548860164351, -105.27230724626),
(40.03558860164351, -105.27230724626),
(40.03568860164351, -105.27230724626),
(40.03578860164351, -105.27230724626)
]
def test_epqs(self):
"""Functional test for National Map EPQS."""
elevs = query.national_map_epqs(self.latlons)
# print('epqs')
# print(elevs)
self.assertIsInstance(elevs, list)
@unittest.skip(
'This test only works on my local machine because it makes use of '
'a file over here. WIP.'
)
def test_1m(self):
"""Functional test for National Map 1m file."""
# I am not including this dataset for now - it is huge.
# Maybe at some point.
elevation_fname = 'data/all.tif'
elevs = query.national_map_1m(self.latlons, elevation_fname)
# print('1m')
# print(elevs)
self.assertIsInstance(elevs, list)
@unittest.skip('open-elevation works, but it is sloooow.')
def test_open_elevation(self):
elevs = query.open_elevation(self.latlons)
# print('open_elevation')
# print(elevs)
self.assertIsInstance(elevs, list)
def test_google_maps(self):
# Note: Running this test requires a user-maintained `config.py`
# file containing their gmaps key as a string.
from config import user_gmaps_key
# Generate enough latlons that the request needs to be broken
# up into chunks.
latlons = [[40.0, -105.0]] * 10000
elevs = query.google(latlons, user_gmaps_key)
# print('google maps')
# print(elevs)
self.assertIsInstance(elevs, list)
if __name__ == '__main__':
unittest.main()
|
"""
# Step 1 - Create the App
# Step 2 - Create the Game
# Step 3 - Build the Game
# Step 4 - Run the App
"""
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from random import randint
class PongPaddle(Widget):
score = NumericProperty(0)
def bounce_ball(self, ball):
if self.collide_widget(ball):
ball.velocity_x *= -1
print('hello world')
class PongBall(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
# Latest Position of the Ball = Current Velocity + Current Position
def move(self):
self.pos = Vector(*self.velocity) + self.pos
# Update - moving the ball by calling the move function and other stuff
# on touch_down() = When our fingers/mouse touches he screen
# on touch_up() - when we lift our finger off the screen after touching it
# on_touch_move() - when we drag our finger on the screen
class PongGame(Widget):
ball = ObjectProperty(None)
player1 = ObjectProperty(None)
player2 = ObjectProperty(None)
def serve_ball(self):
self.ball.velocity = Vector(4, 0).rotate(randint(0, 360))
def update(self, dt):
self.ball.move()
# Bounce off top and bottom Y
if (self.ball.y < 0) or (self.ball.y > self.height - 50):
self.ball.velocity_y *= -1.1
# Bounce off left and increase th score
if self.ball.x < 0:
self.ball.velocity_x *= -1
self.player1.score += 1
# Bounce off right and increase the score
if self.ball.x > self.width - 50:
self.ball.velocity_x *= -1
self.player2.score += 1
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
def on_touch_move(self, touch):
if touch.x < self.width / 1 / 4:
self.player1.center_y = touch.y
if touch.x > self.width * 3 / 4:
self.player2.center_y = touch.y
class PongApp(App):
def build(self):
game = PongGame()
game.serve_ball()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
PongApp().run()
|
# send incrementing packets containing numbers to given host
# start up a server by using ncat
# the static 5.59BETA1 version would work
# http://nmap.org/dist/ncat-portable-5.59BETA1.zip
# the server should be started with CRLF as EOF
# eg: ncat -u -l -C localhost 9111
import subprocess
import argparse
from time import sleep
from sys import argv, exit
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ncat num sender')
parser.add_argument('host', type=str)
parser.add_argument('port', type=str)
parser.add_argument('-s', '--sleep', default=100, type=int, help='sleep time', required=False)
parser.add_argument('--nosleep', help='nosleep', action='store_true')
parser.add_argument('--tcp', help='use tcp instead of udp', action='store_true')
args = parser.parse_args()
cmd = ['ncat', '-u', '-C', args.host, args.port]
if args.tcp:
cmd.remove('-u')
ncat = subprocess.Popen(cmd, stdin=subprocess.PIPE)
cnt = 1
while True: # send till die
ncat.stdin.write('%s\r\n' % ('-' * (1 + (cnt % 8))))
#ncat.stdin.write('%d\r\n' % (cnt % 100))
cnt += 1
print cnt
if not args.nosleep:
sleep(args.sleep/1000.0)
|
# %%
from collections import defaultdict
# %%
with open('input.txt', 'r') as f:
data = f.read()
data = data.split('\n')
# %%
with open('test_input.txt', 'r') as f:
test_data = f.read()
test_data = test_data.split('\n')
# %%
def to_sequence(data):
sequences = []
for x in data:
singal_pattern, digits = x.split(" | ")
entry = {
"signal_pattern": singal_pattern.split(" "),
"digits": digits.split(" "),
}
sequences.append(entry)
return sequences
# %%
UNIQUE_LENGHTS = [2, 3, 4, 7]
def unique_instances(sequences):
unique_instances = 0
for entry in sequences:
for digit in entry["digits"]:
if len(digit) in UNIQUE_LENGHTS:
unique_instances += 1
return unique_instances
# %%
test_sequences = to_sequence(test_data)
unique_instances(test_sequences)
# %%
sequences = to_sequence(data)
unique_instances(sequences)
# %%
single_test_data = ["acedgfb cdfbe gcdfa fbcad dab cefabd cdfgeb eafb cagedb ab | cdfeb fcadb cdfeb cdbaf"]
single_sequences = to_sequence(single_test_data)
test_entry = single_sequences[0]
test_entry
# %%
def calc_configuration(signal_pattern):
configuration = {
"to": set(),
"bo": set(),
"lt": set(),
"lb": set(),
"rt": set(),
"rb": set(),
"mi": set(),
}
# 1
configuration["rt"].update(signal_pattern[2][0])
configuration["rb"].update(signal_pattern[2][0])
# print(1, configuration)
# 7
seven_pattern = signal_pattern[3][0]
configuration["to"] = set(seven_pattern).difference(configuration["rb"])
# print(7, configuration)
# 4
four_pattern = signal_pattern[4][0]
configuration["mi"] = set(four_pattern).difference(configuration["rb"])
configuration["lt"] = set(four_pattern).difference(configuration["rb"])
# print(4, configuration)
# patterns
zero_pattern = None
nine_pattern = None
six_pattern = None
for pattern in signal_pattern[6]:
diff_from_mid = configuration["mi"].intersection(pattern)
intersect_four = set(four_pattern).intersection(pattern)
if len(diff_from_mid) == 1:
zero_pattern = pattern
elif len(intersect_four) == 4:
nine_pattern = pattern
else:
six_pattern = pattern
# 0
configuration["bo"] = set(zero_pattern).difference(
configuration["rb"].union(configuration["rt"].union(configuration["to"]))
)
configuration["lb"] = set(zero_pattern).difference(
configuration["rb"].union(configuration["rt"].union(configuration["to"]))
)
configuration["mi"] = configuration["mi"].difference(set(zero_pattern).intersection(configuration["mi"]))
configuration["lt"] = configuration["lt"].difference(configuration["mi"])
configuration["bo"] = configuration["bo"].difference(configuration["lt"])
configuration["lb"] = configuration["lb"].difference(configuration["lt"])
# print(0, configuration)
# 6
configuration["rb"] = configuration["rb"].intersection(six_pattern)
configuration["rt"] = configuration["rt"].difference(configuration["rb"])
# print(6, configuration)
# 9
configuration["bo"] = configuration["bo"].intersection(nine_pattern)
configuration["lb"] = configuration["lb"].difference(configuration["bo"])
# print(9, configuration)
return configuration
# %%
DIGITS = {
"0": set(["to", "bo", "lb", "rb", "rt", "lt"]),
"1": set(["rb", "rt"]),
"2": set(["to", "bo", "lb", "rt", "mi"]),
"3": set(["to", "bo", "rb", "rt", "mi"]),
"4": set(["rb", "rt", "lt", "mi"]),
"5": set(["to", "bo", "rb", "lt", "mi"]),
"6": set(["to", "bo", "lb", "rb", "lt", "mi"]),
"7": set(["to", "rb", "rt"]),
"8": set(["to", "bo", "lb", "rb", "lt", "mi", "rt"]),
"9": set(["to", "bo", "rb", "lt", "mi", "rt"]),
}
# %%
def calc_digit(letter_to_pos, letters):
positions = set([letter_to_pos[letter] for letter in letters])
for digit, digit_positions in DIGITS.items():
if positions == digit_positions:
return digit
raise Exception("No digit found")
# %%
def calc_message(entry):
signal_pattern = defaultdict(list)
for pattern in entry["signal_pattern"]:
signal_pattern[len(pattern)].append([c for c in pattern])
config = calc_configuration(signal_pattern)
letter_to_pos = dict((letter.pop(), position) for position, letter in config.items())
output_digit = ""
for letter_set in entry["digits"]:
output_digit += calc_digit(letter_to_pos, letter_set)
return int(output_digit)
# %%
calc_message(test_entry)
# %%
def calc_sequence(sequences):
summed_output = 0
for entry in sequences:
output = calc_message(entry)
summed_output += output
return summed_output
# %%
calc_sequence(test_sequences)
# %%
calc_sequence(sequences)
# %%
|
import backbone.support.configurations_variables as confv
import backbone.support.data_loading as dl
import backbone.support.data_analysis as da
import backbone.support.data_cleaning as dc
import backbone.support.configuration_classes as confc
import backbone.support.saving_loading as sl
import backbone.support.plots_and_charts as pc
import backbone.support.build_features as bf
import numpy as np
import backbone.support.models as mdl
from sklearn.utils.class_weight import compute_class_weight
from tensorflow.keras.callbacks import TensorBoard
import time
import backbone.support.directory_file_checking as dfc
import os
from tensorflow.python.keras.callbacks import CSVLogger
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import tensorflow as tf
print("\t===========================================================================================\n"
"\t\tMain program started for MAIN-DATABASE:{database}, GENDER-ISOLATION:{gender}\n"
"\t\t\t\u2234 Dataset Name: {name}\n"
"\t==========================================================================================="
.format(database=confv.database_emodb, gender=confv.gender_male, name=confv.dataset_emodb_male))
'''
# DATA LOADING SECTION
print("\n--------------------Started loading original data from the main database: {name}--------------------".format(name=confv.database_emodb))
data_info_emodb_df = dl.load_original_data(database=confv.database_emodb)
print("No. of sample audio files in {database} database: {length}\n".format(database=confv.database_emodb, length=len(data_info_emodb_df)))
print("Dataframe head of {database} database:".format(database=confv.database_emodb))
print(data_info_emodb_df.head())
print("\nDataframe tail of {database} database:".format(database=confv.database_emodb))
print(data_info_emodb_df.tail())
print("--------------------Finished loading original data from the main database: {name}--------------------".format(name=confv.database_emodb))
# RANDOM BASE AUDIO WAVE ANALYSIS SECTION
print("\n\n--------------------Started random base audio wave analysis for the main database: {name}--------------------".format(name=confv.database_emodb))
da.base_audio_wave_analysis(data_info_emodb_df.audio_fname[500], database=confv.database_emodb, status=confv.original)
print("--------------------Finished random base audio wave analysis for the main database: {name}--------------------".format(name=confv.database_emodb))
# DATAFRAME ADJUSTMENTS SECTION
print("\n\n--------------------Started dataframe adjustment for the main database: {name}--------------------".format(name=confv.database_emodb))
data_info_emodb_df_m, data_info_emodb_df_f = dc.data_adjustments(data_info_emodb_df)
print("--------------------Finished dataframe adjustment for the main database: {name}--------------------".format(name=confv.database_emodb))
# DATAFRAME SAVING
print("\n\n--------------------Started dataframe saving for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
emodb_m_df_obj = confc.DataFrame(database=confv.database_emodb, gender=confv.gender_male, df=data_info_emodb_df_m)
sl.save_dataframe(emodb_m_df_obj)
print("--------------------Finished dataframe saving for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
'''
# LOAD REQUIRED PICKLE
print("\n\n--------------------Started dataframe loading for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
emodb_m_df_obj = confc.DataFrame(database=confv.database_emodb, gender=confv.gender_male)
emodb_m_df_obj = sl.load_dataframe(emodb_m_df_obj)
data_info_emodb_df_m = emodb_m_df_obj.df
print(emodb_m_df_obj.database)
print(emodb_m_df_obj.gender)
print(len(data_info_emodb_df_m))
print(data_info_emodb_df_m.head())
print(data_info_emodb_df_m.tail())
print(emodb_m_df_obj.dataset)
print(emodb_m_df_obj.save_path)
print("--------------------Finished dataframe loading for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
'''
# ORIGINAL DATA DISTRIBUTION ANALYSIS SECTION
print("\n\n--------------------Started original data distribution analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
pc.emotion_distribution_bar_plot(df=data_info_emodb_df_m, title="{database} - {gender} Isolation - No. of Files".format(database=confv.database_emodb, gender=confv.gender_male))
pc.emotion_distribution_pie_plot(df=data_info_emodb_df_m, database=confv.database_emodb, status=confv.original, gender=confv.gender_male, title="{database} - {gender} Isolation - Class/Data/Time Distribution".format(database=confv.database_emodb, gender=confv.gender_male))
print("--------------------Finished original data distribution analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
# ORIGINAL DATA VISUAL ANALYSIS (signal, fft, fbank, mfcc) SECTION
print("\n\n--------------------Started original data visual analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
da.visual_analysis(df=data_info_emodb_df_m, database=confv.database_emodb, status=confv.original, gender=confv.gender_male, envelope=False, resample=False)
da.visual_analysis(df=data_info_emodb_df_m, database=confv.database_emodb, status=confv.original, gender=confv.gender_male, envelope=True, resample=True)
print("--------------------Finished original data visual analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
# DATA CLEANING - DOWN SAMPLING AND NOISE FLOOR DETECTION
print("\n\n--------------------Started data cleaning for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
dc.data_cleaning(df=data_info_emodb_df_m, database=confv.database_emodb)
print("--------------------Finished data cleaning for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
'''
# DATA MINIMUM AUDIO LENGTH COMPLIANCE CHECK
print("\n\n--------------------Started data minimum audio compliance check for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
data_info_emodb_df_m = dc.check_and_adjust_df_for_minimum_audio_length_after_cleaning(df=data_info_emodb_df_m, database=confv.database_emodb, gender=confv.gender_male)
print("--------------------Finished data minimum audio compliance check for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
'''
# CLEANED DATA DISTRIBUTION ANALYSIS SECTION
print("\n\n--------------------Started cleaned data distribution analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
pc.emotion_distribution_bar_plot(df=data_info_emodb_df_m, title="{database} - {gender} Isolation - No. of Files".format(database=confv.database_emodb, gender=confv.gender_male))
pc.emotion_distribution_pie_plot(df=data_info_emodb_df_m, database=confv.database_emodb, status=confv.clean, gender=confv.gender_male, title="{database} - {gender} Isolation - Class/Data/Time Distribution".format(database=confv.database_emodb, gender=confv.gender_male))
print("--------------------Finished cleaned data distribution analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
# CLEANED DATA VISUAL ANALYSIS (signal, fft, fbank, mfcc) SECTION
print("\n\n--------------------Started cleaned data visual analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
da.visual_analysis(df=data_info_emodb_df_m, database=confv.database_emodb, status=confv.clean, gender=confv.gender_male, envelope=False, resample=False)
# This is same as,
# da.visual_analysis(df=data_info_emodb_df_m, database=confv.database_emodb, status=confv.original, gender=confv.gender_male, envelope=True, resample=True)
# Since these cleaned data are already equipped with envelope and resampling, setting them to False or True does not matter.
# (envelope and resample does not matter when its clean)
print("--------------------Finished cleaned data visual analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
'''
# Building Features
print("\n\n--------------------Started building features for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
classes = list(np.unique(data_info_emodb_df_m.stress_emotion))
mconf_emodb_m = confc.ModelConfig(database=confv.database_emodb, gender=confv.gender_male, mode=confv.ml_mode_convolutional, classes=classes)
print(mconf_emodb_m.database)
print(mconf_emodb_m.gender)
print(mconf_emodb_m.mode)
print(mconf_emodb_m.nfilt)
print(mconf_emodb_m.nfeat)
print(mconf_emodb_m.nfft)
print(mconf_emodb_m.step)
print(mconf_emodb_m.classes)
print(mconf_emodb_m.features_save_name)
print(mconf_emodb_m.model_config_save_name)
print(mconf_emodb_m.training_log_name)
print(mconf_emodb_m.model_save_name)
print(mconf_emodb_m.model_h5_save_name)
print(mconf_emodb_m.model_tflite_save_name)
print(mconf_emodb_m.feature_path)
print(mconf_emodb_m.model_config_path)
print(mconf_emodb_m.training_log_path)
print(mconf_emodb_m.model_path)
print(mconf_emodb_m.model_h5_path)
print(mconf_emodb_m.model_tflite_path)
rfpconf_emodb_m = confc.RandFeatParams(df=data_info_emodb_df_m, database=confv.database_emodb, gender=confv.gender_male)
X, y = bf.build_random_features(modelconfig=mconf_emodb_m, randfeatparams=rfpconf_emodb_m)
print("--------------------Finished building features for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
# MODEL & TRAINING
print("\n\n--------------------Started model training for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
input_shape = (X.shape[1], X.shape[2], 1)
model = mdl.get_emodb_male_model(input_shape)
y_flat = np.argmax(y, axis=1)
class_weight = compute_class_weight('balanced', np.unique(y_flat), y_flat)
class_weight = {i : class_weight[i] for i in range(2)}
NAME = "{database}-{gender}-{modeltype}-{spec}-{time}".format(database=confv.database_emodb, gender=confv.gender_male, modeltype=confv.ml_mode_convolutional, spec="1st", time=int(time.time()))
mdl_logs_pth = os.path.join(confv.base_store, confv.log_dir)
tensorboard = TensorBoard(log_dir=mdl_logs_pth + '\\{}'.format(NAME))
dfc.check_dir_inside_saved_features_and_modelconfigs_and_models(parent=confv.saved_training_metrics_logs, database=confv.database_emodb, gender=confv.gender_male)
csv_logger = CSVLogger(mconf_emodb_m.training_log_path)
# earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')
# mcp_save = ModelCheckpoint('.mdl_wts.hdf5', save_best_only=True, monitor='val_loss', mode='min')
# reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, mode='min')
model.fit(X, y, epochs=35, batch_size=32, shuffle=True, class_weight=class_weight, validation_split=0.2, callbacks=[tensorboard, csv_logger])
print("--------------------Finished model training for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
# MODEL SAVING
print("\n\n--------------------Started model saving for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
dfc.check_dir_inside_saved_features_and_modelconfigs_and_models(parent=confv.saved_models, database=confv.database_emodb, gender=confv.gender_male)
model.save(mconf_emodb_m.model_path)
model.save(mconf_emodb_m.model_h5_path)
# Convert the model & save in tflite
converter = tf.lite.TFLiteConverter.from_saved_model(mconf_emodb_m.model_path)
tflite_model = converter.convert()
with open(mconf_emodb_m.model_tflite_path, 'wb') as outfile:
outfile.write(tflite_model)
print("--------------------Finished model saving for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_emodb_male))
|
from . import ops
from . import bbox
from . import mask
from . import backbone
from . import neck
from . import head
from . import loss
from . import architecture
from . import post_process
from . import layers
from . import utils
from .ops import *
from .bbox import *
from .mask import *
from .backbone import *
from .neck import *
from .head import *
from .loss import *
from .architecture import *
from .post_process import *
from .layers import *
from .utils import *
|
# -*- encoding: utf-8 -*-
from hypernets.tests.tabular.tb_dask import is_dask_installed, if_dask_ready, setup_dask
from .var_len_categorical_test import TestVarLenCategoricalFeature
if is_dask_installed:
import dask.dataframe as dd
@if_dask_ready
class TestVarLenCategoricalFeatureByDask(TestVarLenCategoricalFeature):
def setup_class(self):
TestVarLenCategoricalFeature.setup_class(self)
setup_dask(self)
self.df = dd.from_pandas(self.df, npartitions=2)
|
from hiro_graph_client import PasswordAuthTokenApiHandler, HiroGraph, SSLConfig, GraphConnectionHandler
from .testconfig import CONFIG
class TestClient:
connection_handler = GraphConnectionHandler(
root_url=CONFIG.get('URL'),
ssl_config=SSLConfig(verify=False)
)
hiro_api_handler = PasswordAuthTokenApiHandler(
username=CONFIG.get('USERNAME'),
password=CONFIG.get('PASSWORD'),
client_id=CONFIG.get('CLIENT_ID'),
client_secret=CONFIG.get('CLIENT_SECRET'),
secure_logging=False,
connection_handler=connection_handler
)
hiro_api_handler2 = PasswordAuthTokenApiHandler(
username=CONFIG.get('USERNAME'),
password=CONFIG.get('PASSWORD'),
client_id=CONFIG.get('CLIENT_ID'),
client_secret=CONFIG.get('CLIENT_SECRET'),
secure_logging=False,
connection_handler=connection_handler
)
def test_simple_query(self):
hiro_client: HiroGraph = HiroGraph(api_handler=self.hiro_api_handler)
hiro_client.get_node(node_id="ckqjkt42s0fgf0883pf0cb0hx_ckqjl014l0hvr0883hxcvmcwq", meta=True)
hiro_client: HiroGraph = HiroGraph(api_handler=self.hiro_api_handler2)
hiro_client.get_node(node_id="ckqjkt42s0fgf0883pf0cb0hx_ckqjl014l0hvr0883hxcvmcwq", meta=True)
# def query(_id: str):
# hiro_client.get_node(node_id=_id)
#
# t1 = threading.Thread(target=query, args=("ckqjkt42s0fgf0883pf0cb0hx_ckqjl014l0hvr0883hxcvmcwq",))
# t2 = threading.Thread(target=query, args=("ckqjkt42s0fgf0883pf0cb0hx_ckshomsv84i9x0783cla0g2bv",))
# t3 = threading.Thread(target=query, args=("ckqjkt42s0fgf0883pf0cb0hx_ckshomsv84i9x0783cla0g2bv",))
#
# t1.start()
# t2.start()
# t3.start()
|
"""Interface tests for Treants.
"""
import datreant as dtr
from datreant import Treant
import pytest
import mock
import os
import py
from . import test_collections
from .test_trees import TestTree
class TestTreant(TestTree):
"""Test generic Treant features"""
treantname = 'testtreant'
@pytest.fixture
def treant(self, tmpdir):
with tmpdir.as_cwd():
c = dtr.treants.Treant(TestTreant.treantname)
return c
@pytest.fixture
def basic_treant(self, tmpdir):
# treant with tags and cats, in tmpdir
with tmpdir.as_cwd():
t1 = Treant('Rincewind')
t1.tags.add('magical')
t1.categories.add({'colour': 'octarine'})
yield t1
def test_init(self, treant, tmpdir):
"""Test basic Treant init"""
assert treant.name == self.treantname
assert treant.abspath == (os.path.join(tmpdir.strpath,
self.treantname) + os.sep)
def test_init_from_Tree(self, tmpdir):
with tmpdir.as_cwd():
tree = dtr.Tree('this')
t = Treant(tree)
assert t.path == tree.path
@pytest.mark.parametrize("tags", (None, [], ['small', 'spiky']))
@pytest.mark.parametrize("categories", (None, {}, {'colour': 'red'}))
def test_init_generate(self, tags, categories, tmpdir):
# test combinations of tags and categories
# when generating from scratch
with tmpdir.as_cwd():
t = dtr.Treant('babs', tags=tags, categories=categories)
if tags is not None:
for tag in tags:
assert tag in t.tags
if categories is not None:
for cat, val in categories.items():
assert cat in t.categories
assert t.categories[cat] == val
@pytest.mark.parametrize("tags", (None, [], ['small', 'spiky']))
@pytest.mark.parametrize("categories", (None, {}, {'colour': 'red'}))
def test_init_regenerate_via_name(self, tags, categories, tmpdir):
# test regenerating a Treant from its directory
with tmpdir.as_cwd():
t = dtr.Treant('this')
t2 = dtr.Treant('this', tags=tags, categories=categories)
if tags is not None:
for tag in tags:
assert tag in t2.tags
if categories is not None:
for cat, val in categories.items():
assert cat in t2.categories
assert t2.categories[cat] == val
def test_gen_OSError(self, tmpdir):
with tmpdir.as_cwd():
with mock.patch('os.makedirs') as mp:
mp.sideeffect = OSError(os.errno.ENOSPC, 'Mock - disk full')
with pytest.raises(OSError) as error:
t = Treant('new')
t.tags.add('worthless')
assert error.errno == os.errno.ENOSPC
def test_gen_OSError13(self, tmpdir):
with tmpdir.as_cwd():
with mock.patch('os.makedirs') as mp:
mp.sideeffect = OSError(os.errno.EACCES, 'Mock - disk full')
with pytest.raises(OSError) as error:
t = Treant('new')
t.tags.add('worthless')
assert error.errno == os.errno.EACCES
assert ("Permission denied; cannot create 'new'"
in str(error))
def test_gen_methods(self, tmpdir):
"""Test the variety of ways we can generate a new Treant
1. ``Treant('treant')``, where 'treant' is not an existing file or
directory path
2. ``Treant('treant')``, where 'treant' is an existing directory
without Treant state files inside
3. ``Treant('/somedir/treant')``, where 'treant' is not an existing
file or directory in 'somedir'
4. ``Treant('/somedir/treant')``, where 'treant' is an existing
directory in 'somedir' without any Treant state files inside
"""
with tmpdir.as_cwd():
# 1
t1 = Treant('newone')
assert os.path.exists(t1._treantdir)
# 2
os.mkdir('another')
t2 = Treant('another')
assert os.path.exists(t2._treantdir)
# 3
t3 = Treant('yet/another')
assert os.path.exists(t3._treantdir)
# 4
os.mkdir('yet/more')
t4 = Treant('yet/more')
assert os.path.exists(t4._treantdir)
@pytest.mark.parametrize("tags", (None, [], ['small', 'spiky']))
@pytest.mark.parametrize("categories", (None, {}, {'colour': 'red'}))
def test_regen(self, tags, categories, tmpdir):
"""Test regenerating Treant.
- create Treant
- modify Treant a little
- create same Treant (should regenerate)
- check that modifications were saved
"""
with tmpdir.as_cwd():
C1 = Treant('regen', tags=tags, categories=categories)
C2 = Treant('regen') # should be regen of C1
if tags is not None:
for tag in tags:
assert tag in C2.tags
if categories is not None:
for cat, val in categories.items():
assert cat in C2.categories
assert C2.categories[cat] == val
assert C1 == C2
# they point to the same file, but they are not the same object
assert C1 is not C2
def test_cmp(self, tmpdir):
"""Test the comparison of Treants when sorting"""
with tmpdir.as_cwd():
c1 = Treant('a')
c2 = Treant('b')
c3 = Treant('c')
assert sorted([c3, c2, c1]) == [c1, c2, c3]
assert c1 <= c2 < c3
assert c3 >= c2 > c1
class TestTags:
"""Test treant tags"""
@pytest.mark.parametrize("tags", (("1", "2", "3"), # tuple
["1", "2", "3"], # list
{"1", "2", "3"})) # set
def test_adding_array_like_tags(self, treant, tags):
treant.tags.add(tags)
assert sorted(list(tags)) == sorted(list(treant.tags))
def test_add_tags(self, treant):
treant.tags.add('marklar')
assert 'marklar' in treant.tags
treant.tags.add('lark', 'bark')
assert 'marklar' in treant.tags
assert 'lark' in treant.tags
assert 'bark' in treant.tags
def test_remove_tags(self, treant):
treant.tags.add('marklar')
assert 'marklar' in treant.tags
treant.tags.remove('marklar')
assert 'marklar' not in treant.tags
treant.tags.add('marklar')
treant.tags.add('lark', 'bark')
treant.tags.add(['fark', 'bark'])
assert 'marklar' in treant.tags
assert 'lark' in treant.tags
assert 'bark' in treant.tags
assert 'fark' in treant.tags
assert len(treant.tags) == 4
treant.tags.remove('fark')
assert 'fark' not in treant.tags
assert len(treant.tags) == 3
treant.tags.remove('fark')
assert len(treant.tags) == 3
treant.tags.clear()
assert len(treant.tags) == 0
def test_tags_set_behavior(self, tmpdir):
with tmpdir.as_cwd():
# 1
t1 = Treant('maple')
t1.tags.add(['sprout', 'deciduous'])
# 2
t2 = Treant('sequoia')
t2.tags.add(['sprout', 'evergreen'])
tags_union = t1.tags | t2.tags
for t in ['sprout', 'deciduous', 'evergreen']:
assert t in tags_union
tags_intersect = t1.tags & t2.tags
assert 'sprout' in tags_intersect
for t in ['deciduous', 'evergreen']:
assert t not in tags_intersect
tags_diff = t1.tags - t2.tags
assert 'deciduous' in tags_diff
for t in ['sprout', 'evergreen']:
assert t not in tags_diff
tags_symm_diff = t1.tags ^ t2.tags
for t in ['deciduous', 'evergreen']:
assert t in tags_symm_diff
assert 'sprout' not in tags_symm_diff
# 3
t3 = Treant('oak')
t3.tags.add(['deciduous'])
# Test set membership
assert t1.tags <= t1.tags
assert not t1.tags < t1.tags
assert t1.tags == t1.tags
assert not t1.tags < t3.tags
assert t1.tags > t3.tags
# test TypeErrors in Tags
# type_error_msg = "Operands must be AggTags, Tags, or a set."
with pytest.raises(TypeError) as e:
('tree') == t1.tags
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
t1.tags < ('tree')
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
('tree') - t1.tags
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
t1.tags - ('tree')
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
('tree') | t1.tags
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
t1.tags | ('tree')
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
('tree') & t1.tags
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
t1.tags & ('tree')
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
('tree') ^ t1.tags
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
t1.tags ^ ('tree')
# assert e.value.message == type_error_msg
def test_tags_setting(self, tmpdir):
"""Test that we can set tags with lists or sets, or with Tags
objects.
"""
with tmpdir.as_cwd():
# set with a list
t1 = Treant('maple')
t1.tags = ['sprout', 'deciduous']
assert t1.tags == {'sprout', 'deciduous'}
# set with a set
t2 = Treant('elm')
t2.tags = {'sprout', 'deciduous'}
assert t2.tags == {'sprout', 'deciduous'}
# set with a Tags object
t3 = Treant('sequoia')
t3.tags = t2.tags
assert t3.tags == {'sprout', 'deciduous'}
def test_tags_fuzzy(self, tmpdir, treant):
"""Test that fuzzy matching for tags works as expected.
"""
treant.tags.add('bark', 'leafy', 'green', 'Leafy')
for tag in ('leafy', 'Leafy'):
assert tag in treant.tags.fuzzy('leafy')
def test_tags_getitem(self, treant):
"""Test the queryability of tags via its __getitem__ method."""
treant.tags.add('marklar', 'lark', 'bark')
t = treant
# single presence
assert t.tags['lark']
assert not t.tags['mark']
# single not present
assert t.tags[{'mark'}]
# anding
assert t.tags[['marklar', 'bark']]
# oring
assert t.tags['marklar', 'bark']
assert t.tags['mark', 'bark']
assert not t.tags['mark', 'dark']
# not anding
assert t.tags[{'dark', 'marklar'}]
# complex logic
assert t.tags[[('marklar', 'bark'), {'dark'}]]
@pytest.mark.parametrize('tag', (1, 1.2))
def test_tags_only_strings(self, treant, tag):
with pytest.raises(ValueError):
treant.tags.add(tag)
def test_tags_printing(self, treant):
treant.tags.add('marklar')
repr = str(treant.tags)
assert repr == "Tags\n====\n'marklar'\n"
class TestCategories:
"""Test treant categories"""
def test_add_categories(self, treant):
treant.categories.add(marklar=42)
assert 'marklar' in treant.categories
treant.categories.add({'bark': 'snark'}, lark=27)
assert 'bark' in treant.categories
assert 'snark' not in treant.categories
assert 'bark' in treant.categories
assert treant.categories['bark'] == 'snark'
assert treant.categories['lark'] == 27
treant.categories['lark'] = 42
assert treant.categories['lark'] == 42
def test_remove_categories(self, treant):
treant.categories.add(marklar=42)
assert 'marklar' in treant.categories
treant.categories.remove('marklar')
assert 'marklar' not in treant.categories
treant.categories.add({'bark': 'snark'}, lark=27)
del treant.categories['bark']
assert 'bark' not in treant.categories
# should just work, even if key isn't present
treant.categories.remove('smark')
treant.categories['lark'] = 42
treant.categories['fark'] = 32.3
treant.categories.clear()
assert len(treant.categories) == 0
def test_add_wrong(self, treant):
with pytest.raises(TypeError):
treant.categories.add('temperature', 300)
with pytest.raises(TypeError):
treant.categories.add(['mark', 'matt'])
@pytest.mark.parametrize('key, val', [[2, 'twenty'],
[['blarg'], "nothin'"],
[None, "literally nothin'"],
[True, 'tautologically']])
def test_add_wrong_keys(self, treant, key, val):
# always test both addition methods
with pytest.raises(TypeError):
treant.categories[key] = val
with pytest.raises(TypeError):
treant.categories.add(key, val)
@pytest.mark.parametrize('key, val', [['bark', ['shaggy']],
['snark', {'yes'}]])
def test_add_wrong_values(self, treant, key, val):
# always test both addition methods
with pytest.raises(TypeError):
treant.categories.add(key, val)
with pytest.raises(TypeError):
treant.categories[key] = val
def test_None_deletes(self, treant):
"""Setting a category to ``None`` should delete the value.
"""
treant.categories['size'] = 'large'
treant.categories['bark'] = 'smooth'
treant.categories['bark'] = None
assert 'bark' not in treant.categories
assert 'size' in treant.categories
treant.categories['bark'] = 'smooth'
treant.categories.add(bark=None)
assert 'bark' not in treant.categories
assert 'size' in treant.categories
def test_setting_to_None_VE(self, treant):
with pytest.raises(ValueError) as err:
treant.categories['colour'] = None
assert "Cannot set to 'None'" in str(err)
def test_KeyError(self, treant):
with pytest.raises(KeyError):
treant.categories['hello?']
def test_get_categories(self, treant):
treant.categories['bark'] = 'dark'
treant.categories['leaves'] = 'many'
treant.categories['roots'] = 'shallow'
# get a single category
assert treant.categories['leaves'] == 'many'
# get multiple categories with list
assert treant.categories[['leaves', 'bark']] == ['many', 'dark']
# get multiple categories with set
assert treant.categories[{'leaves', 'bark'}] == {'leaves': 'many',
'bark': 'dark'}
def test_set_categories(self, treant, tmpdir):
a_dict = {'leaves': 'many', 'bark': 'dark'}
treant.categories = a_dict
assert treant.categories == a_dict
a_dict.update({'roots': 'shallow'})
assert treant.categories != a_dict
treant.categories = a_dict
assert a_dict == treant.categories
# test setting from other Treant's categories
with tmpdir.as_cwd():
s = dtr.Treant('sprout')
s.categories = {'shoes': False, 'shirt': False}
assert treant.categories != s.categories
treant.categories = s.categories
assert treant.categories == s.categories
def test_from_treant(self, treant, tmpdir):
with tmpdir.as_cwd():
dtr.Treant('sprout', categories=treant.categories)
class TestReadOnly:
"""Test Treant functionality when read-only"""
@pytest.fixture
def treant(self, tmpdir, request):
with tmpdir.as_cwd():
c = dtr.treants.Treant('testtreant')
c.tags.add('72')
py.path.local(c.abspath).chmod(0o0550, rec=True)
def fin():
py.path.local(c.abspath).chmod(0o0770, rec=True)
request.addfinalizer(fin)
return c
def test_treant_read_only(self, treant):
"""Test that a read-only Treant can be accessed, but not written to.
"""
c = dtr.treants.Treant(treant.abspath)
assert '72' in c.tags
with pytest.raises(OSError):
c.tags.add('yet another')
|
from django.apps import AppConfig
class HomeShortyConfig(AppConfig):
name = 'home_shorty'
|
import urllib2
from jsonrpc import JSONRPCService
from urllib import urlencode
from utils.cmd import keep_trying
def reviews_pastehtml_upload(source, input_type="html"):
"""
Uploads 'source' as an 'input_type' type to pastehtml.com.
source ....... source of the webpage/text
input_type ... txt or html (default html)
"""
url = "http://pastehtml.com/upload/create?input_type=%s&result=address"
request = urllib2.Request(url % input_type, data=urlencode([("txt", source)]))
result = keep_trying(lambda: urllib2.urlopen(request), urllib2.URLError, "access pastehtml.com")
s = result.read()
# There is a bug at pastehtml.com, that sometimes it returns:
# http://pastehtml.comhttp://pastehtml.com/view/1eddmnp.html
# instead of:
# http://pastehtml.com/view/1eddmnp.html
# So we check if this is the case, and correct it:
if s.find("http", 2) != -1:
s = s[s.find("http", 2):]
return s
def reviews_sympy_org_upload(data, url_base):
def _do_upload():
s = JSONRPCService(url_base + "/async")
r = s.RPC.upload_task(data["num"], data["result"],
data["interpreter"], data["testcommand"], data["log"])
if "task_url" not in r:
# This happens for example when the server is over quota, see
# https://github.com/sympy/sympy-bot/issues/110
# Note that this exact error message is checked below, in case
# something else raises a ValueError
raise urllib2.URLError("Quota")
return r
def _handler(e):
if e.message == "Quota":
print "Server appears to be over quota."
r = keep_trying(_do_upload, urllib2.URLError, "access %s" %
url_base, _handler)
return r["task_url"]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-03-10 12:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('config', '0002_auto_20180310_1024'),
]
operations = [
migrations.AlterField(
model_name='link',
name='created_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='link',
name='href',
field=models.URLField(verbose_name='链接'),
),
migrations.AlterField(
model_name='link',
name='status',
field=models.PositiveIntegerField(choices=[(1, '上线'), (2, '删除')], default=1, verbose_name='状态'),
),
migrations.AlterField(
model_name='link',
name='title',
field=models.CharField(max_length=255, verbose_name='名字'),
),
migrations.AlterField(
model_name='link',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
migrations.AlterField(
model_name='link',
name='weight',
field=models.PositiveIntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], default=1, help_text='权重越高越靠前', verbose_name='权重'),
),
migrations.AlterField(
model_name='sidebar',
name='content',
field=models.CharField(max_length=255, verbose_name='内容'),
),
migrations.AlterField(
model_name='sidebar',
name='created_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='sidebar',
name='display_type',
field=models.PositiveIntegerField(choices=[(1, 'HTML'), (2, '最新文章'), (3, '最热文章'), (4, '最新评论')], default=1, verbose_name='展示状态'),
),
migrations.AlterField(
model_name='sidebar',
name='status',
field=models.PositiveIntegerField(choices=[(1, '上线'), (2, '删除')], default=1, verbose_name='状态'),
),
migrations.AlterField(
model_name='sidebar',
name='title',
field=models.CharField(max_length=255, verbose_name='名字'),
),
migrations.AlterField(
model_name='sidebar',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
]
|
# __init__.py
# Version of the visual-automata package
__version__ = "1.1.1"
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 15:37:10 2020
@author: harinder
"""
import yaml
def convert_to_jmx():
filename = "result/jmeter.jmx"
stream = open("result/jmeter.yml","r")
my_dicts = yaml.load_all(stream)
try:
with open(filename,"w") as xmlfile:
for my_dict in my_dicts:
for k,v in sorted(my_dict.items()):
if k=="Name":
testplanName="<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\
<jmeterTestPlan version=\"1.2\" properties=\"5.0\" jmeter=\"5.3\">\n\
<hashTree>\n\
<TestPlan guiclass=\"TestPlanGui\" testclass=\"TestPlan\" testname=\""+ str(v) +"\" enabled=\"true\">\n\
<stringProp name=\"TestPlan.comments\"></stringProp>\n\
<boolProp name=\"TestPlan.functional_mode\">false</boolProp>\n\
<boolProp name=\"TestPlan.tearDown_on_shutdown\">true</boolProp>\n\
<boolProp name=\"TestPlan.serialize_threadgroups\">false</boolProp>\n\
<elementProp name=\"TestPlan.user_defined_variables\" elementType=\"Arguments\" guiclass=\"ArgumentsPanel\" testclass=\"Arguments\" testname=\"User Defined Variables\" enabled=\"true\">\n\
<collectionProp name=\"Arguments.arguments\"/>\n\
</elementProp>\n\
<stringProp name=\"TestPlan.user_define_classpath\"></stringProp>\n\
</TestPlan>\n\
<hashTree>\n\
<ThreadGroup guiclass=\"ThreadGroupGui\" testclass=\"ThreadGroup\" testname=\""+ str(v) +"\" enabled=\"true\">\n"
xmlfile.write(str(testplanName))
if k=="TestConcurrency":
concurrency="<stringProp name=\"ThreadGroup.on_sample_error\">continue</stringProp>\n\
<elementProp name=\"ThreadGroup.main_controller\" elementType=\"LoopController\" guiclass=\"LoopControlPanel\" testclass=\"LoopController\" testname=\"Loop Controller\" enabled=\"true\">\n\
<boolProp name=\"LoopController.continue_forever\">false</boolProp>\n\
<stringProp name=\"LoopController.loops\">100</stringProp>\n\
</elementProp>\n\
<stringProp name=\"ThreadGroup.num_threads\">"+ str(v) +"</stringProp>\n\
<stringProp name=\"ThreadGroup.ramp_time\">10</stringProp>\n\
<boolProp name=\"ThreadGroup.scheduler\">false</boolProp>\n\
<stringProp name=\"ThreadGroup.duration\"></stringProp>\n\
<stringProp name=\"ThreadGroup.delay\"></stringProp>\n\
<boolProp name=\"ThreadGroup.same_user_on_next_iteration\">true</boolProp>\n\
</ThreadGroup>\n\
<hashTree>\n"
xmlfile.write(str(concurrency))
if k=="TestThroughput":
TestThroughput="<ConstantThroughputTimer guiclass=\"TestBeanGUI\" testclass=\"ConstantThroughputTimer\" testname=\"Constant Throughput Timer\" enabled=\"true\">\n\
<intProp name=\"calcMode\">0</intProp>\n\
<doubleProp>\n\
<name>throughput</name>\n\
<value>"+ str(v) +"</value>\n\
<savedValue>0.0</savedValue>\n\
</doubleProp>\n\
</ConstantThroughputTimer>\n\
<hashTree/>\n\
<IncludeController guiclass=\"IncludeControllerGui\" testclass=\"IncludeController\" testname=\"Include Controller\" enabled=\"false\">\n\
<stringProp name=\"IncludeController.includepath\"></stringProp>\n\
</IncludeController>\n\
<hashTree/>\n\
<ConfigTestElement guiclass=\"HttpDefaultsGui\" testclass=\"ConfigTestElement\" testname=\"HTTP Request Defaults\" enabled=\"true\">\n\
<elementProp name=\"HTTPsampler.Arguments\" elementType=\"Arguments\" guiclass=\"HTTPArgumentsPanel\" testclass=\"Arguments\" testname=\"User Defined Variables\" enabled=\"true\">\n\
<collectionProp name=\"Arguments.arguments\"/>\n\
</elementProp>\n\
<stringProp name=\"HTTPSampler.domain\"></stringProp>\n\
<stringProp name=\"HTTPSampler.port\"></stringProp>\n\
<stringProp name=\"HTTPSampler.protocol\"></stringProp>\n\
<stringProp name=\"HTTPSampler.contentEncoding\"></stringProp>\n\
<stringProp name=\"HTTPSampler.path\"></stringProp>\n\
<stringProp name=\"HTTPSampler.concurrentPool\">6</stringProp>\n\
<stringProp name=\"HTTPSampler.connect_timeout\"></stringProp>\n\
<stringProp name=\"HTTPSampler.response_timeout\"></stringProp>\n</ConfigTestElement>\n\
<hashTree/>\n\
<CSVDataSet guiclass=\"TestBeanGUI\" testclass=\"CSVDataSet\" testname=\"CSV Data Set Config\" enabled=\"true\">\n\
<stringProp name=\"delimiter\">,</stringProp>\n\
<stringProp name=\"fileEncoding\"></stringProp>\n\
<stringProp name=\"filename\"></stringProp>\n\
<boolProp name=\"ignoreFirstLine\">false</boolProp>\n\
<boolProp name=\"quotedData\">false</boolProp>\n\
<boolProp name=\"recycle\">true</boolProp>\n\
<stringProp name=\"shareMode\">shareMode.all</stringProp>\n\
<boolProp name=\"stopThread\">false</boolProp>\n\
<stringProp name=\"variableNames\"></stringProp>\n\
</CSVDataSet>\n\
<hashTree/>\n\
<CookieManager guiclass=\"CookiePanel\" testclass=\"CookieManager\" testname=\"HTTP Cookie Manager\" enabled=\"true\">\n\
<collectionProp name=\"CookieManager.cookies\"/>\n\
<boolProp name=\"CookieManager.clearEachIteration\">false</boolProp>\n\
<boolProp name=\"CookieManager.controlledByThreadGroup\">false</boolProp>\n\
</CookieManager>\n\
<hashTree/>\n\
<CacheManager guiclass=\"CacheManagerGui\" testclass=\"CacheManager\" testname=\"HTTP Cache Manager\" enabled=\"true\">\n\
<boolProp name=\"clearEachIteration\">false</boolProp>\n\
<boolProp name=\"useExpires\">true</boolProp>\n\
<boolProp name=\"CacheManager.controlledByThread\">false</boolProp>\n\
</CacheManager>\n\
<hashTree/>\n"
xmlfile.write(str(TestThroughput))
if k=="Threads":
for thread in my_dict['Threads']:
for x in enumerate(thread.items()):
thread="<ThroughputController guiclass=\"ThroughputControllerGui\" testclass=\"ThroughputController\" testname=\""+ str(x[1][1]['ThreadName']) +" - Throughput Controller\" enabled=\"true\">\n\
<intProp name=\"ThroughputController.style\">1</intProp>\n\
<boolProp name=\"ThroughputController.perThread\">false</boolProp>\n\
<intProp name=\"ThroughputController.maxThroughput\">1</intProp>\n\
<FloatProperty>\n\
<name>ThroughputController.percentThroughput</name>\n\
<value>"+ str(x[1][1]['Throughput'])+ "</value>\n\
<savedValue>0.0</savedValue>\n\
</FloatProperty>\n\
</ThroughputController>\n\
<hashTree>\n\
<HTTPSamplerProxy guiclass=\"HttpTestSampleGui\" testclass=\"HTTPSamplerProxy\" testname=\""+ str(x[1][1]['ThreadName']) +"\" enabled=\"true\">\n\
<elementProp name=\"HTTPsampler.Arguments\" elementType=\"Arguments\" guiclass=\"HTTPArgumentsPanel\" testclass=\"Arguments\" testname=\"User Defined Variables\" enabled=\"true\">\n\
<collectionProp name=\"Arguments.arguments\"/>\n\
</elementProp>\n\
<stringProp name=\"HTTPSampler.domain\"></stringProp>\n\
<stringProp name=\"HTTPSampler.port\"></stringProp>\n\
<stringProp name=\"HTTPSampler.protocol\"></stringProp>\n\
<stringProp name=\"HTTPSampler.contentEncoding\"></stringProp>\n\
<stringProp name=\"HTTPSampler.path\"></stringProp>\n\
<stringProp name=\"HTTPSampler.method\">"+ str(x[1][1]['Method'])+ "</stringProp>\n\
<boolProp name=\"HTTPSampler.follow_redirects\">true</boolProp>\n\
<boolProp name=\"HTTPSampler.auto_redirects\">false</boolProp>\n\
<boolProp name=\"HTTPSampler.use_keepalive\">true</boolProp>\n\
<boolProp name=\"HTTPSampler.DO_MULTIPART_POST\">false</boolProp>\n\
<stringProp name=\"HTTPSampler.embedded_url_re\"></stringProp>\n\
<stringProp name=\"HTTPSampler.connect_timeout\"></stringProp>\n\
<stringProp name=\"HTTPSampler.response_timeout\"></stringProp>\n\
</HTTPSamplerProxy>\n\
<hashTree>\n\
<ResponseAssertion guiclass=\"AssertionGui\" testclass=\"ResponseAssertion\" testname=\"Response Assertion\" enabled=\"true\">\n\
<collectionProp name=\"Asserion.test_strings\">\n\
<stringProp name=\"49586\">200</stringProp>\n\
</collectionProp>\n\
<stringProp name=\"Assertion.custom_message\"></stringProp>\n\
<stringProp name=\"Assertion.test_field\">Assertion.response_code</stringProp>\n\
<boolProp name=\"Assertion.assume_success\">false</boolProp>\n\
<intProp name=\"Assertion.test_type\">16</intProp>\n\
</ResponseAssertion>\n\
<hashTree/>\n\
<HeaderManager guiclass=\"HeaderPanel\" testclass=\"HeaderManager\" testname=\"HTTP Header Manager\" enabled=\"true\">\n\
<collectionProp name=\"HeaderManager.headers\"/>\n\
</HeaderManager>\n\
<hashTree/>\n\
</hashTree>\n\
</hashTree>\n"
xmlfile.write(str(thread))
summary="<ResultCollector guiclass=\"SummaryReport\" testclass=\"ResultCollector\" testname=\"Summary Report\" enabled=\"true\">\n\
<boolProp name=\"ResultCollector.error_logging\">false</boolProp>\n\
<objProp>\n\
<name>saveConfig</name>\n\
<value class=\"SampleSaveConfiguration\">\n\
<time>true</time>\n\
<latency>true</latency>\n\
<timestamp>true</timestamp>\n\
<success>true</success>\n\
<label>true</label>\n\
<code>true</code>\n\
<message>true</message>\n\
<threadName>true</threadName>\n\
<dataType>true</dataType>\n\
<encoding>false</encoding>\n\
<assertions>true</assertions>\n\
<subresults>true</subresults>\n\
<responseData>false</responseData>\n\
<samplerData>false</samplerData>\n\
<xml>false</xml>\n\
<fieldNames>true</fieldNames>\n\
<responseHeaders>false</responseHeaders>\n\
<requestHeaders>false</requestHeaders>\n\
<responseDataOnError>false</responseDataOnError>\n\
<saveAssertionResultsFailureMessage>true</saveAssertionResultsFailureMessage>\n\
<assertionsResultsToSave>0</assertionsResultsToSave>\n\
<bytes>true</bytes>\n\
<sentBytes>true</sentBytes>\n\
<url>true</url>\n\
<threadCounts>true</threadCounts>\n\
<idleTime>true</idleTime>\n\
<connectTime>true</connectTime>\n\
</value>\n\
</objProp>\n\
<stringProp name=\"filename\"></stringProp>\n\
</ResultCollector>\n\
<hashTree/>\n\
</hashTree>\n\
<BackendListener guiclass=\"BackendListenerGui\" testclass=\"BackendListener\" testname=\"Backend Listener\" enabled=\"false\">\n\
<elementProp name=\"arguments\" elementType=\"Arguments\" guiclass=\"ArgumentsPanel\" testclass=\"Arguments\" enabled=\"true\">\n\
<collectionProp name=\"Arguments.arguments\">\n\
<elementProp name=\"graphiteMetricsSender\" elementType=\"Argument\">\n\
<stringProp name=\"Argument.name\">graphiteMetricsSender</stringProp>\n\
<stringProp name=\"Argument.value\">org.apache.jmeter.visualizers.backend.graphite.TextGraphiteMetricsSender</stringProp>\n\
<stringProp name=\"Argument.metadata\">=</stringProp>\n\
</elementProp>\n\
<elementProp name=\"graphiteHost\" elementType=\"Argument\">\n\
<stringProp name=\"Argument.name\">graphiteHost</stringProp>\n\
<stringProp name=\"Argument.value\"></stringProp>\n\
<stringProp name=\"Argument.metadata\">=</stringProp>\n\
</elementProp>\n\
<elementProp name=\"graphitePort\" elementType=\"Argument\">\n\
<stringProp name=\"Argument.name\">graphitePort</stringProp>\n\
<stringProp name=\"Argument.value\">2003</stringProp>\n\
<stringProp name=\"Argument.metadata\">=</stringProp>\n\
</elementProp>\n\
<elementProp name=\"rootMetricsPrefix\" elementType=\"Argument\">\n\
<stringProp name=\"Argument.name\">rootMetricsPrefix</stringProp>\n\
<stringProp name=\"Argument.value\">jmeter.</stringProp>\n\
<stringProp name=\"Argument.metadata\">=</stringProp>\n\
</elementProp>\n\
<elementProp name=\"summaryOnly\" elementType=\"Argument\">\n\
<stringProp name=\"Argument.name\">summaryOnly</stringProp>\n\
<stringProp name=\"Argument.value\">true</stringProp>\n\
<stringProp name=\"Argument.metadata\">=</stringProp>\n\
</elementProp>\n\
<elementProp name=\"samplersList\" elementType=\"Argument\">\n\
<stringProp name=\"Argument.name\">samplersList</stringProp>\n\
<stringProp name=\"Argument.value\"></stringProp>\n\
<stringProp name=\"Argument.metadata\">=</stringProp>\n\
</elementProp>\n\
<elementProp name=\"percentiles\" elementType=\"Argument\">\n\
<stringProp name=\"Argument.name\">percentiles</stringProp>\n\
<stringProp name=\"Argument.value\">90;95;99</stringProp>\n\
<stringProp name=\"Argument.metadata\">=</stringProp>\n\
</elementProp>\n\
</collectionProp>\n\
</elementProp>\n\
<stringProp name=\"classname\">org.apache.jmeter.visualizers.backend.graphite.GraphiteBackendListenerClient</stringProp>\n\
</BackendListener>\n\
<hashTree/>\n\
</hashTree>\n\
</hashTree>\n\
</jmeterTestPlan>\n"
xmlfile.write(summary)
except Exception as e:
print ("exception {}", e)
finally:
xmlfile.close()
|
from dlapp import create_from_yaml_file
from dlapp import create_from_yaml_data
from dlapp import create_from_json_file
from dlapp import create_from_json_data
from dlapp import create_from_csv_file
from dlapp import create_from_csv_data
from os import path
test_path = path.dirname(__file__)
class TestDynamicDict:
def test_creating_dlquery_from_yaml_file(self):
"""Test creating a dlapp instance from YAML file."""
filename = path.join(test_path, 'data/sample.yaml')
query_obj = create_from_yaml_file(filename)
assert query_obj.get('a') == 'Apricot'
def test_creating_dlquery_from_yaml_data(self):
"""Test creating a dlapp instance from YAML data."""
data = '''{"a": "Apricot", "b": "Banana"}'''
query_obj = create_from_yaml_data(data)
assert query_obj.get('a') == 'Apricot'
def test_creating_dlquery_from_json_file(self):
"""Test creating a dlapp instance from JSON file."""
filename = path.join(test_path, 'data/sample.json')
# filename = self.json_filename
query_obj = create_from_json_file(filename)
assert query_obj.get('a') == 'Apricot'
def test_creating_dlquery_from_json_data(self):
"""Test creating a dlapp instance from JSON data."""
data = '''{"a": "Apricot", "b": "Banana"}'''
query_obj = create_from_json_data(data)
assert query_obj.get('a') == 'Apricot'
def test_creating_dlquery_from_csv_file(self):
"""Test creating a dlapp instance from CSV file."""
filename = path.join(test_path, 'data/sample.csv')
query_obj = create_from_csv_file(filename)
result = query_obj.find(lookup='a=_iwildcard(Ap*)')
assert result == ['Apple', 'Apricot']
query_obj.find(lookup='a=_regex(Ap\\w+)', select='')
assert result == ['Apple', 'Apricot']
query_obj.find(lookup='a', select='where a match Ap\\w+')
assert result == ['Apple', 'Apricot']
def test_creating_dlquery_from_csv_data(self):
"""Test creating a dlapp instance from CSV data."""
data = '''
a,b,c
Apple,Banana,Cherry
Apricot,Boysenberry,Cantaloupe
Avocado,Blueberry,Clementine
'''
data = '\n'.join(line.strip() for line in data.strip().splitlines())
query_obj = create_from_csv_data(data)
result = query_obj.find(lookup='b=_iregex(.+n.+)')
assert result == ['Banana', 'Boysenberry']
|
"""
Constants and functions pertaining to NFC bounding interval annotator datasets.
"""
import math
from tensorflow.data import Dataset, TFRecordDataset
from tensorflow.io import FixedLenFeature
import numpy as np
import tensorflow as tf
import vesper.util.signal_utils as signal_utils
import vesper.util.time_frequency_analysis_utils as tfa_utils
'''
Source datasets are tfrecord files.
Each source dataset is repeated, and elements from the different sources
are interleaved and parsed. Each element includes a waveform, clip start
and end indices, call start and end indices (when the element is a call),
and a clip ID.
'''
'''
* Spectrogram units should be proportional to spectral density,
or watts per hertz.
* The expected value of a spectrogram value for a white noise
signal should not change with sample rate, window size, hop size,
or DFT size.
* Changing the DFT size but not the window size is a kind of
interpolation that should leave the spectrogram magnitude
more or less unchanged.
* Sinusoidal signal power should scale with the window duration
(measured in seconds), since the window bandwidth is inversely
proportional to the duration. For example, for a bin-centered
sinusoid, the value of its bin will double if the window
duration doubles, since the same amount of signal power is
present in a bin that is only half as wide.
* Should we scale spectrogram values somehow according to window
type, i.e. as a function of the window coefficients?
* Decibel-unit-valued spectrograms should have a maximum value
(perhaps, say, for white uniformly distributed noise of maximum
amplitude) of around 100.
* The sample value range of waveforms from which spectrograms are
computed should be [-1, 1]. Signals of different bit depths
should be scaled to this common range before computing their
spectrograms.
'''
_WAVEFORM_EXAMPLE_FEATURES = {
'waveform': FixedLenFeature((), tf.string),
'clip_start_index': FixedLenFeature((), tf.int64),
'clip_end_index': FixedLenFeature((), tf.int64),
'call_start_index': FixedLenFeature((), tf.int64),
'call_end_index': FixedLenFeature((), tf.int64),
'clip_id': FixedLenFeature((), tf.int64),
}
def create_waveform_dataset_from_tensors(waveforms):
# One might like to just say:
#
# dataset = tf.data.Dataset.from_tensor_slices(waveforms)
#
# here instead of using a generator, but that only works if
# the waveforms all have the same length. Using a generator
# works even if the waveform lengths differ.
def generator():
for waveform in waveforms:
yield _normalize_waveform(waveform)
return tf.data.Dataset.from_generator(generator, tf.float32)
def create_waveform_dataset_from_tfrecord_files(dir_path):
"""
Creates a dataset of waveforms and associated metadata.
Each dataset example has the form:
(waveform, clip_start_index, clip_end_index, call_start_index,
call_end_index, clip_id)
All of the waveforms of the dataset have the same length. Each
waveform contains one Vesper clip, which starts and ends at
waveform indices `clip_start_index` and `clip_end_index`. Each
clip contains a nocturnal flight call that starts and ends at
waveform indices `call_start_index` and `call_end_index`.
The `clip_id` of a dataset example is the ID of the clip included
in the waveform in the Vesper archive to which the clip belongs.
"""
file_paths = dir_path.glob('*.tfrecords')
# Convert tfrecord file paths from `Path` objects to strings.
file_paths = sorted(str(p) for p in file_paths)
# Shuffle file paths.
file_paths = np.random.permutation(file_paths)
# Create dataset of file paths.
dataset = Dataset.from_tensor_slices(file_paths)
# Create dataset of example protos, interleaving protos from the
# different tfrecord files.
dataset = dataset.interleave(
_create_repeating_tfrecords_dataset,
cycle_length=len(file_paths),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Parse example protos.
dataset = dataset.map(
_parse_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def _create_repeating_tfrecords_dataset(file_path):
return TFRecordDataset([file_path]).repeat()
def _parse_example(proto):
example = tf.io.parse_single_example(proto, _WAVEFORM_EXAMPLE_FEATURES)
# Get waveform tensor.
bytes_ = example['waveform']
waveform = tf.io.decode_raw(bytes_, out_type=tf.int16, little_endian=True)
waveform = _normalize_waveform(waveform)
clip_start_index = example['clip_start_index']
clip_end_index = example['clip_end_index']
call_start_index = example['call_start_index']
call_end_index = example['call_end_index']
clip_id = example['clip_id']
return (
waveform, clip_start_index, clip_end_index,
call_start_index, call_end_index, clip_id)
def _normalize_waveform(waveform):
"""
Normalizes a waveform so it has 32-bit floating point samples in [-1, 1].
"""
return tf.cast(waveform, tf.float32) / 32768
def create_training_dataset(dir_path, settings):
"""
Creates a dataset suitable for training a neural network.
Each dataset example has the form:
(spectrogram slice, label)
All of the spectrogram slices of the dataset have the same shape,
of the form (spectrum count, bin count, 1). The exact shape depends
on the values of several `settings` attributes. The spectrogram slices
are suitable for input into a Keras convolutional neural network.
The `label` of a dataset example is zero if the spectrogram slice
does not contain a call starting at a certain index (it may or may
not contain a call starting at another index). and one if it does
contain a call starting at that index.
"""
dataset = create_waveform_dataset_from_tfrecord_files(dir_path)
processor = _ExampleProcessor(settings)
dataset = dataset.map(
processor.preprocess_waveform,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
processor.compute_spectrogram,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
processor.slice_spectrogram_along_frequency_axis_with_shift,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
processor.normalize_spectrogram_background,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
_diddle_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def _diddle_example(gram, label, _):
# Reshape gram for input into Keras CNN.
gram = tf.expand_dims(gram, 2)
# Return only gram and label, discarding clip ID.
return gram, label
def create_validation_dataset(dir_path, settings):
dataset = create_waveform_dataset_from_tfrecord_files(dir_path)
dataset = dataset.map(
_extract_clip_waveform,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def _extract_clip_waveform(
waveform, clip_start_index, clip_end_index, call_start_index,
call_end_index, _):
waveform = waveform[clip_start_index:clip_end_index]
call_start_index -= clip_start_index
call_end_index -= clip_start_index
return waveform, call_start_index, call_end_index
def create_inference_dataset(waveform_dataset, settings):
"""
Creates a dataset of spectrogram slice sequences.
Each dataset example is a sequence of consecutive slices of the
spectrogram of one input dataset waveform, with a hop size of one
spectrum. The slices all have the same shape. Different dataset
examples may have different numbers of slices, according to the
(possibly differing) lengths of the input waveforms.
"""
processor = _ExampleProcessor(settings)
dataset = waveform_dataset.map(
processor.compute_spectrogram,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
processor.slice_spectrogram_along_frequency_axis,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
processor.normalize_spectrogram_background,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
processor.slice_spectrogram_along_time_axis,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def get_spectrogram_slice_shape(settings):
spectrum_count = _get_spectrogram_slice_length(settings)
_, _, _, freq_start_index, freq_end_index = \
_get_low_level_spectrogram_settings(settings)
bin_count = freq_end_index - freq_start_index
return (spectrum_count, bin_count, 1)
def _get_spectrogram_slice_length(settings):
s = settings
slice_duration = s.waveform_slice_duration
window_size = s.spectrogram_window_size
hop_size = window_size * s.spectrogram_hop_size / 100
return 1 + int(round((slice_duration - window_size) / hop_size))
def _get_low_level_spectrogram_settings(settings):
s = settings
fs = s.waveform_sample_rate
s2f = signal_utils.seconds_to_frames
# spectrogram
window_size = s2f(s.spectrogram_window_size, fs)
fraction = s.spectrogram_hop_size / 100
hop_size = s2f(s.spectrogram_window_size * fraction, fs)
dft_size = tfa_utils.get_dft_size(window_size)
# frequency slicing
f2i = tfa_utils.get_dft_bin_num
freq_start_index = f2i(s.spectrogram_start_freq, fs, dft_size)
freq_end_index = f2i(s.spectrogram_end_freq, fs, dft_size) + 1
return (window_size, hop_size, dft_size, freq_start_index, freq_end_index)
class _ExampleProcessor:
"""
Dataset example processor.
A dataset example processor prepares dataset examples for input to
a neural network during training or inference. It performs waveform
slicing, waveform modifications for dataset augmentation, and
spectrogram computation.
"""
def __init__(self, settings):
self._settings = settings
s = settings
s2f = signal_utils.seconds_to_frames
sample_rate = s.waveform_sample_rate
# Get the length of an example waveform in samples.
self._waveform_slice_length = \
s2f(s.waveform_slice_duration, sample_rate)
# Get the call start index in a positive example waveform.
self._positive_example_call_start_index = \
s2f(s.positive_example_call_start_offset, sample_rate)
# Get low-level spectrogram settings.
(self._window_size, self._hop_size, self._dft_size,
self._freq_start_index, self._freq_end_index) = \
_get_low_level_spectrogram_settings(s)
self._window_fn = tf.signal.hann_window
# Get values for slicing negative example waveforms.
self._negative_example_exclusion_window_length = self._window_size
self._negative_example_exclusion_window_start_offset = -(
self._positive_example_call_start_index +
self._negative_example_exclusion_window_length // 2)
def preprocess_waveform(
self, waveform, clip_start_index, clip_end_index,
call_start_index, call_end_index, clip_id):
"""
Preprocesses one input waveform.
Slices and applies data augmentations to the specified waveform
according to this preprocessor's settings.
"""
s = self._settings
if s.bound_type == 'End':
(waveform, clip_start_index, clip_end_index, call_start_index,
call_end_index) = _time_reverse_waveform(
waveform, clip_start_index, clip_end_index, call_start_index,
call_end_index)
waveform_slice, label = \
self._slice_waveform(waveform, call_start_index)
if s.waveform_amplitude_scaling_data_augmentation_enabled:
waveform_slice = self._scale_waveform_amplitude(waveform_slice)
return waveform_slice, label, clip_id
def _slice_waveform(self, waveform, call_start_index):
# Decide whether example is positive or negative.
positive = \
tf.random.uniform(()) <= \
self._settings.positive_example_probability
if positive:
# Slice so call starts at desired index.
slice_start_index = \
call_start_index - self._positive_example_call_start_index
else:
# negative example
# Slice so call start is outside of negative example call
# start exclusion window. The slice start index is uniformly
# distributed over the portion of the waveform from the
# beginning to the end less the waveform slice length,
# with the exception of the exclusion window.
# TODO: Perhaps we should modify datasets so waveforms
# contain clips only, without padding to make them all
# the same length?
minval = 0
maxval = tf.cast(
len(waveform) - self._waveform_slice_length -
self._negative_example_exclusion_window_length,
tf.int64)
slice_start_index = \
tf.random.uniform((), minval, maxval, dtype=tf.int64)
exclusion_window_start_index = \
call_start_index + \
self._negative_example_exclusion_window_start_offset
if slice_start_index >= exclusion_window_start_index:
slice_start_index += \
self._negative_example_exclusion_window_length
slice_end_index = slice_start_index + self._waveform_slice_length
waveform_slice = waveform[slice_start_index:slice_end_index]
label = 1 if positive else 0
return waveform_slice, label
def _scale_waveform_amplitude(self, waveform):
max_abs = tf.math.reduce_max(tf.math.abs(waveform))
if max_abs == 0:
# waveform samples are all zero
return waveform
else:
# waveform samples are not all zero
# Find scale factor that would make maximum absolute waveform
# value one.
max_factor = _f32(1) / max_abs
# Find scale factor that would reduce RMS waveform value to
# 1 / 256. Yield 1 if RMS value is already less than 1 / 256.
sum_squared = tf.math.reduce_sum(waveform * waveform)
size = tf.cast(tf.size(waveform), tf.float32)
rms = tf.math.sqrt(sum_squared / size)
min_factor = tf.math.minimum(_f32(1), _f32(1 / 256) / rms)
# Choose random factor between `min_factor` and `max_factor`,
# with distribution uniform on log scale.
max_log = tf.math.log(max_factor)
min_log = tf.math.log(min_factor)
log_factor = tf.random.uniform(
(), min_log, max_log, dtype=tf.float32)
factor = tf.math.exp(log_factor)
# Scale waveform by chosen factor.
return factor * waveform
def compute_spectrogram(self, waveform, *args):
"""Computes the spectrogram of a waveform."""
s = self._settings
# Compute STFT. To use `tf.signal.stft`, we must add a leading
# unit dimension to the waveform tensor. After the call to
# `tf.signal.stft` we effectively remove the corresponding
# dimension of the resulting `stfts` tensor.
waveforms = tf.expand_dims(waveform, 0)
stfts = tf.signal.stft(
waveforms, self._window_size, self._hop_size, self._dft_size,
self._window_fn)
stft = stfts[0]
# Get spectrogram, i.e. squared magnitude of STFT.
gram = tf.math.real(stft * tf.math.conj(stft))
# gram = tf.abs(stft) ** 2
# Normalize spectrogram values so a full-scale, bin-centered
# sinusoid has a value of one with a rectangular window.
# TODO: Consider using a different normalization scheme that
# yields more consistent values (proportional to the spectral
# density, with units of watts per hertz) for noise across
# different sample rates, window sizes, and DFT sizes. This
# is what we'd like to use for spectrogram display, and it
# seems that we might as well use it here, too. It isn't
# necessary to build a working system, but the consistency
# might be helpful, for example for dataset visualization.
normalizing_scale_factor = 1 / (self._window_size / 2) ** 2
gram *= normalizing_scale_factor
# Take spectrogram log and apply affine transform to put
# full scale sinusoids at about 100 dB.
gram = tf.math.log(gram + s.spectrogram_log_epsilon)
decibel_scale_factor = 10 / math.log(10)
gram = 100 + decibel_scale_factor * gram
return (gram,) + tuple(args)
def slice_spectrogram_along_frequency_axis(self, gram, *args):
gram = gram[..., self._freq_start_index:self._freq_end_index]
return (gram,) + tuple(args)
def normalize_spectrogram_background(self, gram, *args):
s = self._settings
rank = s.spectrogram_background_normalization_percentile_rank
if rank is not None:
ranks = tf.constant([rank])
percentiles = _get_spectrogram_percentiles(gram, ranks)
percentiles = tf.reshape(percentiles, (1, tf.size(percentiles)))
gram = gram - percentiles
return (gram,) + tuple(args)
def slice_spectrogram_along_frequency_axis_with_shift(self, gram, *args):
# Get frequency shift in bins.
max_shift = self._settings.max_spectrogram_frequency_shift
shift = tf.random.uniform(
(), -max_shift, max_shift + 1, dtype=tf.int64)
gram = gram[
..., self._freq_start_index + shift:self._freq_end_index + shift]
return (gram,) + tuple(args)
def slice_spectrogram_along_time_axis(self, gram, *args):
slice_length = _get_spectrogram_slice_length(self._settings)
forward_slices = _slice_spectrogram(gram, slice_length)
reversed_gram = tf.reverse(gram, axis=(0,))
backward_slices = _slice_spectrogram(reversed_gram, slice_length)
return (forward_slices, backward_slices) + tuple(args)
def _time_reverse_waveform(
waveform, clip_start_index, clip_end_index, call_start_index,
call_end_index):
# Reverse waveform.
waveform = tf.reverse(waveform, [0])
# Get waveform length, casting to int64 for bounds swapping arithmetic.
length = tf.cast(len(waveform), tf.int64)
# Swap and complement clip bounds.
clip_start_index, clip_end_index = \
_swap_bounds(clip_start_index, clip_end_index, length)
# Swap and complement call bounds.
call_start_index, call_end_index = \
_swap_bounds(call_start_index, call_end_index, length)
return (
waveform, clip_start_index, clip_end_index, call_start_index,
call_end_index)
def _swap_bounds(start_index, end_index, length):
new_start_index = length - 1 - end_index
new_end_index = length - 1 - start_index
return new_start_index, new_end_index
def _f32(x):
return tf.cast(x, tf.float32)
_MAX_GRAM_VALUE = 120
def _get_spectrogram_percentiles(gram, percentile_ranks):
# Round gram values to nearest integer.
gram = tf.cast(tf.round(gram), tf.int32)
# Clip values.
gram = tf.clip_by_value(gram, 0, _MAX_GRAM_VALUE)
# Transpose gram so first dimension is frequency.
gram = tf.transpose(gram)
# print('rounded, clipped, and transposed spectrogram:')
# print(gram)
def accumulate_counts(x):
length = _MAX_GRAM_VALUE + 1
counts = tf.math.bincount(x, minlength=length, maxlength=length)
return tf.cumsum(counts)
cumulative_counts = tf.map_fn(accumulate_counts, gram)
# print()
# print('cumulative sums of rounded bin value counts:')
# print(cumulative_counts)
shape = tf.shape(gram)
bin_count = shape[0]
spectrum_count = shape[1]
percentile_ranks = tf.cast(percentile_ranks, tf.float32)
thresholds = percentile_ranks / 100. * tf.cast(spectrum_count, tf.float32)
thresholds = tf.cast(tf.round(thresholds), tf.int32)
thresholds = tf.reshape(thresholds, (1, len(thresholds)))
thresholds = tf.tile(thresholds, (bin_count, 1))
percentiles = tf.searchsorted(cumulative_counts, thresholds)
# print()
# print('percentiles:')
# print(percentiles)
return tf.cast(percentiles, tf.float32)
def _slice_spectrogram(gram, slice_length):
# Get tensor of consecutive spectrogram slices.
slices = tf.signal.frame(gram, slice_length, frame_step=1, axis=0)
# Add trailing dimension for input into Keras CNN.
slices = tf.expand_dims(slices, 3)
return slices
def _main():
_test_stft()
def _test_stft():
sample_rate = 24000
epsilon = 1e-10
for window_size in (8, 12, 16, 20, 24, 28, 32, 48, 64):
waveform = _create_sinusoid(window_size, sample_rate)
waveforms = tf.expand_dims(waveform, 0)
dft_size = tfa_utils.get_dft_size(window_size)
stft = tf.signal.stft(
waveforms, window_size, window_size, dft_size, None)
gram = tf.abs(stft) ** 2
normalizing_scale_factor = 1 / (window_size / 2) ** 2
gram *= normalizing_scale_factor
decibel_scale_factor = 10 / math.log(10)
gram = 100 + decibel_scale_factor * tf.math.log(gram + epsilon)
print(window_size, gram)
def _create_sinusoid(window_size, sample_rate):
freq = 3000
phase_factor = 2 * math.pi * freq / sample_rate
phases = phase_factor * tf.range(window_size, dtype=tf.float32)
return tf.math.cos(phases)
def _test_stft_new():
epsilon = 1e-10
bin_num = 1
trial_count = 1000
for sample_rate in (22050, 24000, 32000, 41000, 48000):
for window_dur in (.005, .010, .015):
bin_value_sum = 0
for trial_num in range(trial_count):
window_size = int(round(window_dur * sample_rate))
# waveform = _create_sinusoid(window_size, sample_rate)
waveform = _create_white_noise(window_size)
waveforms = tf.expand_dims(waveform, 0)
dft_size = tfa_utils.get_dft_size(window_size) * 4
# window_fn = tf.signal.hann_window
window_fn = None
stft = tf.signal.stft(
waveforms, window_size, window_size, dft_size, window_fn)
gram = tf.abs(stft) ** 2
bin_value_sum += gram[0, 0, bin_num]
# normalizing_scale_factor = 1 / (window_size / 2) ** 2
# gram *= normalizing_scale_factor
#
# decibel_scale_factor = 10 / math.log(10)
# gram = 100 + decibel_scale_factor * tf.math.log(gram + epsilon)
bin_value_avg = bin_value_sum / trial_count
print(
sample_rate, window_dur, window_size, dft_size,
bin_value_avg.numpy())
def _create_white_noise(window_size):
return tf.random.uniform((window_size,), minval=-1, maxval=1)
if __name__ == '__main__':
_main()
|
"""
A random distribution is a set of random numbers
that follow a certain propability density function.
Probability Density Function:
A function that describes a continuous probability.
i.e. probability of all values in an array.
The probability is set by a number between 0 and 1,
where 0 means that the value will never and
1 means that the value will always occur.
Generate a 1-D array containing 100 values,
where each value has to be 3, 5, 7 or 9.
The probability for the value to be 3 is set to be 0.1.
The probability for the value to be 5 is set to be 0.3.
The probability for the value to be 7 is set to be 0.6.
The probability for the value to be 9 is set to be 0.
"""
from numpy import random
x = random.choice([3, 5, 7, 9], p=[0.1, 0.3, 0.6, 0], size=(100))
print(x)
"""
The sum of all probability numbers should be 1.
Even if you run the example above 100 times, the value 9 will
nevr occur.
You can return arrays of any shape and size by specifying
the shape in the size parameter.
"""
# same example, but return a 2-D array
# from numpy import random
x = random.choice([3, 5, 7, 9], p = [0.1, 0.3, 0.6, 0.0], size = (3, 5))
print(x)
|
from behave import given, when, then, step
import sys
import pexpect
import time
import os
@when("we open the command line interface")
def step_impl(context):
os.chdir(context.basedir)
context.config_prompt = 'brewer@localhost#'
context.normal_prompt = 'brewer@localhost>'
context.cli = pexpect.spawn('bash -ci ./cli', cwd=context.basedir)
@then("we should be presented with a welcome prompt containing")
def step_impl(context):
context.cli.expect(context.text, timeout=2)
@when("we send the following command")
def step_impl(context):
for command in context.text.split('\n'):
context.cli.write("%s\n" % (command))
@then("we should be in configuration mode")
def step_impl(context):
context.cli.expect([context.config_prompt])
@then("we should be in operational mode")
def step_impl(context):
context.cli.expect([context.normal_prompt])
@then("the command line should have cleanly closed")
def step_impl(context):
time.sleep(1)
context.cli.write('sdf')
|
from setuptools import setup, find_packages
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='exercisecoachtools',
version='0.1.2',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
python_requires='>=3.7, <4',
url='https://github.com/chandojo/ExerciseCoachTools',
license='MIT License',
author='chandojo',
description='Tools for exercise physiologists and coaches.',
long_description=long_description,
long_description_content_type='text/markdown',
setup_requires=['pytest-runner'],
tests_require=['pytest']
)
|
from typing import Dict, Iterable, Iterator, List, Sequence, Optional, Tuple
from word_ladder.types import WordDict
from word_ladder.rung import Rung
def get_word_with_letter_missing(word: str, position: int) -> str:
"""
>>> get_word_with_letter_missing('dog', 0)
'?og'
>>> get_word_with_letter_missing('dog', 1)
'd?g'
>>> get_word_with_letter_missing('dog', 2)
'do?'
"""
if position == 0:
return f'?{word[1:]}'
if position == len(word) - 1:
return f'{word[:-1]}?'
return f'{word[:position]}?{word[position + 1:]}'
def get_neighbors(word: str, words: WordDict) -> Sequence[str]:
"""
>>> words = {'?og': ['dog', 'log', 'fog'], 'd?g': ['dog', 'dig'], 'do?': ['dog'], 'l?g': ['log'], 'lo?': ['log']}
>>> sorted(get_neighbors('dig', words))
['dig', 'dog']
>>> sorted(get_neighbors('fog', words))
['dog', 'fog', 'log']
"""
return frozenset(
neighbor
for position in range(len(word))
for neighbor in words.get(get_word_with_letter_missing(word, position), [])
)
def get_all_previous_words(rung: Rung) -> Tuple[str]:
"""
>>> rung_0 = Rung(None, ['dig'], {})
>>> path = {'dog': ('log', 'fog', 'dig', 'dug', 'don', 'dob'), 'fig': ('dig', 'fog', 'fin')}
>>> words = ['dob', 'don', 'dug', 'fin', 'fog', 'log']
>>> rung_1 = Rung(rung_0, words, path)
>>> sorted(get_all_previous_words(rung_1))
['dig', 'dob', 'don', 'dug', 'fin', 'fog', 'log']
"""
return tuple(rung.words) + (get_all_previous_words(rung.previous) if rung.previous else ())
def get_next_rung(previous_rung: Rung, words: WordDict) -> Rung:
"""
>>> from word_ladder.compile_words import add_to_words_dict
>>> words = {}
>>> for w in ['dog', 'log', 'fog', 'dig', 'dug', 'dim', 'don', 'dob', 'lug', 'fin']:
... words = add_to_words_dict(words, w)
>>> rung = Rung(None, ['dog', 'fig'], {})
>>> next_rung = get_next_rung(rung, words)
>>> {k: sorted(v) for k,v in next_rung.path.items()}
{'dog': ['dig', 'dob', 'don', 'dug', 'fog', 'log'], 'fig': ['dig', 'fin', 'fog']}
>>> sorted(next_rung.words)
['dig', 'dob', 'don', 'dug', 'fin', 'fog', 'log']
"""
previous_words = get_all_previous_words(previous_rung)
path = {
source_word: tuple(w for w in get_neighbors(source_word, words) if w not in previous_words)
for source_word in previous_rung.words
}
word_soup = frozenset(w for these_words in path.values() for w in these_words)
return Rung(previous_rung, word_soup, path)
def keys_for_value(d: Dict[str, Iterable[str]], value: str) -> Iterator[str]:
"""
>>> d = {'a': ['x', 'y', 'z'], 'b': ['l', 'm', 'z'], 'c': ['t', 'u']}
>>> list(keys_for_value(d, 'y'))
['a']
>>> list(keys_for_value(d, 'u'))
['c']
>>> list(keys_for_value(d, 'z'))
['a', 'b']
"""
for key, values in d.items():
if value in values:
yield key
def get_ladders(rung: Rung, word: str) -> Sequence[List[str]]:
"""
>>> rung_0 = Rung(None, ['dig'], {})
>>> rung_1 = Rung(rung_0, ['dog', 'log', 'fig', 'din'], {'dig': ('dog', 'log', 'fig', 'din')})
>>> words = ['dig', 'dob', 'don', 'dug', 'fin', 'fog', 'log', 'din']
>>> path = {'dog': ('log', 'fog', 'dig', 'dug', 'don', 'dob'), 'fig': ('dig', 'fog', 'fin'), 'din': ('dig', 'fin')}
>>> rung_2 = Rung(rung_1, words, path)
>>> get_ladders(rung_2, 'fin')
[['dig', 'fig', 'fin'], ['dig', 'din', 'fin']]
"""
if not rung.previous:
return [[word]]
return [
ladder + [word]
for previous_word in keys_for_value(rung.path, word)
for ladder in get_ladders(rung.previous, previous_word)
]
def build_rungs(start_word, target_word, words) -> Rung:
rung = Rung(None, [start_word], {})
counter = 1
while target_word not in rung.words and len(rung.words) > 0:
rung = get_next_rung(rung, words)
counter += 1
if rung.words:
print(f'Round {counter}: {len(rung.words):3} possible words, eg. {", ".join(sorted(rung.words)[:6])}')
return rung
|
from ignite.engine import Events, Engine
from protozoo.tiktorch_config_keys import ModelZooEntry
def get_trainer(model_zoo_entry: ModelZooEntry) -> Engine:
def training_step(trainer: Engine, batch) -> float:
print("STEP")
ipt, tgt = batch
trainer.state.optimizer.zero_grad()
pred = trainer.state.model(ipt)
loss = trainer.state.loss_fn(pred, tgt)
loss.backward()
trainer.state.optimizer.step()
return loss.item()
trainer = Engine(training_step)
@trainer.on(Events.STARTED)
def training_setup(trainer: Engine):
trainer.state.model = model_zoo_entry.model_config.model_class(**model_zoo_entry.model_config.model_kwargs)
assert model_zoo_entry.model_config.pretrained_source is None
trainer.state.optimizer = model_zoo_entry.optimizer_config.optimizer_class(
trainer.state.model.parameters(), **model_zoo_entry.optimizer_config.optimizer_kwargs
)
trainer.state.loss_fn = model_zoo_entry.loss_config.loss_class(
trainer.state.model.parameters(), **model_zoo_entry.loss_config.loss_kwargs
)
for callback in model_zoo_entry.trainer_callbacks:
trainer.add_event_handler(callback.event, callback.function)
return trainer
|
#!/usr/bin/env python3
import argparse
import datetime
import json
import re
from copy import deepcopy
from gocddash.analysis import data_access, go_client, domain
from gocddash.util import app_config
from gocddash.console_parsers.junit_report_parser import JunitConsoleParser
from gocddash.console_parsers.determine_parser import get_log_parser
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--app-cfg', help='application config')
parser.add_argument('-f', '--file-source', help='go client file source')
return parser.parse_args()
def setup_go_client(pargs):
application_cfg_path = pargs.app_cfg
app_config.create_app_config(application_cfg_path)
file_source = pargs.file_source
if file_source:
app_config.get_app_config().cfg['GO_SERVER_URL'] = file_source
go_client.go_client(
app_config.get_app_config().cfg['GO_SERVER_URL'],
(app_config.get_app_config().cfg['GO_SERVER_USER'], app_config.get_app_config().cfg['GO_SERVER_PASSWD'])
)
def log(string):
print(str(datetime.datetime.now()) + " " + string)
class SyncController:
def __init__(self, db, go, chunk_size=10):
self.db = db
self.go = go
self.chunk_size = chunk_size
self.to_notify = []
self.max_to_sync = 500
def sync(self):
self.sync_agents()
self.sync_pipeline_list()
self.update_sync_rules()
self.sync_pipelines()
self.notify_breakers()
def sync_agents(self):
"""
Update mapping from uuid to go-agent name in database.
"""
json_text = self.go.get_agents()
for agent in json.loads(json_text)["_embedded"]["agents"]:
self.db.save_agent(agent['uuid'], agent['hostname'])
def sync_pipeline_list(self):
json_text = self.go.get_pipeline_groups()
for group in json.loads(json_text):
for pipeline in group['pipelines']:
self.db.save_pipeline(pipeline['name'], group['name'])
def update_sync_rules(self):
json_text = self.go.get_pipeline_groups()
group_for_pipeline = {}
for group in json.loads(json_text):
for pipeline in group['pipelines']:
group_copy = deepcopy(group)
group_copy['pipelines'] = [pipeline]
group_for_pipeline[pipeline['name']] = group_copy
for new_pipeline in self.db.list_new_pipelines():
self.determine_sync_attributes(
new_pipeline['pipeline_name'],
group_for_pipeline[new_pipeline['pipeline_name']]
)
def determine_sync_attributes(self, pipeline_name, pipeline_group_structure):
"""
Update new pipelines, i.e. pipelines where the sync field is NULL in
the database, if they match some rule. All rules are applied in the
order given by db.list_pipeline_sync_rules(), so the last rule wins.
"""
for rule in self.db.list_pipeline_sync_rules():
assert rule['kind'] == 're'
nodes = JsonNodes(pipeline_group_structure).nodes
for key, value in nodes:
if key == rule['pipeline_groups_field']:
if re.search(rule['pattern'], value):
kwargs = {}
for param in [
'sync',
'log_parser',
'email_notifications'
]:
if rule[param] is not None:
kwargs[param] = rule[param]
self.db.update_pipeline(pipeline_name, **kwargs)
def sync_pipelines(self):
for pipeline_name in self.db.get_pipelines_to_sync():
self.sync_pipeline(pipeline_name)
def sync_pipeline(self, pipeline_name):
max_ins = self.max_instance_for_pipeline(pipeline_name)
wanted_pipeline_instances = self.get_wanted_instances(pipeline_name, max_ins)
fetched_pipelines_history = self.get_pipeline_history(pipeline_name, wanted_pipeline_instances)
for pipeline_instance in fetched_pipelines_history:
self.store_synced_pipeline(pipeline_name, pipeline_instance)
done = self.sync_stages(pipeline_name, pipeline_instance)
self.db.store_pipeline_instance_done(pipeline_instance["id"], done)
def store_synced_pipeline(self, pipeline_name, pipeline_instance):
pipeline_counter = pipeline_instance["counter"]
print('Store synced pipeline', pipeline_name, pipeline_counter)
pipeline_id = pipeline_instance["id"]
instance = domain.PipelineInstance(
pipeline_name,
pipeline_counter,
pipeline_instance["build_cause"]["trigger_message"],
pipeline_id
)
if not self.db.pipeline_instance_exists(pipeline_name, pipeline_counter):
self.db.insert_pipeline_instance(instance)
def sync_stages(self, pipeline_name, pipeline_instance):
"""
Find all stages for a pipeline instance, and sync them.
Return whether all were done.
"""
pipeline_counter = pipeline_instance["counter"]
pipeline_id = pipeline_instance["id"]
done = True
for stage in pipeline_instance['stages']:
done &= self.sync_stage(pipeline_name, pipeline_counter, pipeline_id, stage)
return done
def sync_stage(self, pipeline_name, pipeline_counter, pipeline_id, stage):
"""
Find any new runs for a stage, and sync them.
Return whether all were done.
"""
if not stage['scheduled']:
return False
stage_name = stage['name']
current_stage_counter = int(stage['counter'])
previous_stage_counter = self.db.get_latest_synced_stage(pipeline_id, stage_name)
stage_counters = range(previous_stage_counter + 1, current_stage_counter + 1)
done = True
for stage_counter in stage_counters:
done &= self.sync_stage_occurrence(
pipeline_name,
pipeline_counter,
pipeline_id,
stage_name,
stage_counter
)
return done
def sync_stage_occurrence(self, pipeline_name, pipeline_counter, pipeline_id,
stage_name, stage_counter):
"""
Store information about stage run from go-server and sync its jobs.
Return whether we were done with the stage.
"""
stage_occurrence_json = self.go.get_stage_instance(pipeline_name, pipeline_counter,
stage_counter, stage_name)
stage_occurrence = json.loads(stage_occurrence_json)
stage_result = stage_occurrence["result"]
if stage_result == 'Unknown':
print(" Skipping stage: {} / {} - still in progress".format(
stage_name, stage_counter))
return False
print(" Fetching stage: {} / {}".format(stage_name, stage_counter))
stage_id = stage_occurrence["id"]
# Leave for now but a Stage doesn't have a scheduled_date in the API
timestamp = self.ms_timestamp_to_date(stage_occurrence["jobs"][0]["scheduled_date"])
stage = domain.Stage(stage_name, stage_occurrence["approved_by"], stage_result,
stage_counter, stage_id, timestamp)
self.db.insert_stage(pipeline_id, stage)
all_done = True
for job in stage_occurrence['jobs']:
if job.get("state") == "Completed":
self.sync_job(pipeline_name,
pipeline_counter,
stage_id,
stage_name,
stage_counter,
job)
else:
all_done = False
return all_done
def sync_job(self, pipeline_name, pipeline_counter, stage_id, stage_name, stage_counter, job):
"""
Store information about job and tests from go-server.
Remember what we should notify breakers about.
Sync failure info if failure.
"""
print('sync_job')
job_name = job['name']
agent_uuid = job['agent_uuid']
scheduled_date = self.ms_timestamp_to_date(job['scheduled_date'])
job_id = job['id']
job_result = job['result']
try:
parser = JunitConsoleParser(pipeline_name, pipeline_counter, stage_counter, stage_name, job_name)
tests_run, tests_failed, tests_skipped = parser.parse_bar_chart_info()
except LookupError as error:
print('Failed parsing test results for {}/{}/{}/{}/{}: {}'.format(
pipeline_name, pipeline_counter, stage_counter, stage_name, job_name, error
))
tests_run, tests_failed, tests_skipped = 0, 0, 0
job = domain.Job(job_id, stage_id, job_name, agent_uuid, scheduled_date,
job_result, tests_run, tests_failed, tests_skipped)
self.db.insert_job(stage_id, job)
print('job result', job_result)
if job_result != 'Passed' and self.should_notify(pipeline_name):
stage_failure_info = domain.get_pipeline_head(pipeline_name)
failure_streak = domain.get_latest_failure_streak(pipeline_name)
self.to_notify.append((stage_failure_info, failure_streak))
if job_result == 'Failed' and not self.db.is_failure_downloaded(stage_id):
self.sync_failure_info(pipeline_counter, pipeline_name,
stage_id, stage_name, stage_counter, job_name)
def should_notify(self, pipeline_name):
"""
Are email notifications enabled for this pipeline?
"""
pipeline = self.db.get_pipeline(pipeline_name)
return pipeline and pipeline['email_notifications']
def sync_failure_info(self, pipeline_counter, pipeline_name,
stage_id, stage_name, stage_counter, job_name):
"""
Store failure information from go-server for a given job,
as extracted from its log parser.
"""
try:
log_parser_class = get_log_parser(pipeline_name)
log_parser = log_parser_class(pipeline_name, pipeline_counter, stage_counter, stage_name, job_name)
failure_stage = log_parser.get_failure_stage()
self.db.insert_failure_information(stage_id, failure_stage)
log_parser.insert_info(stage_id)
except LookupError as error:
print("Failed to sync failure info for {}/{}/{}/{}/{}: {}".format(
pipeline_counter, pipeline_name, stage_name, stage_counter, job_name, error)
)
@staticmethod
def ms_timestamp_to_date(ms):
"""
Datetime object with truncated fractions of seconds from POSIX timestamp.
"""
return datetime.datetime.fromtimestamp(ms // 1000)
def max_instance_for_pipeline(self, pipeline_name):
"""
Return the highest pipeline counter in Go for the given pipeline.
"""
try:
history_json = self.go.request_pipeline_history(pipeline_name)
return json.loads(history_json)['pipelines'][0]['counter']
except LookupError:
return 0
def get_wanted_instances(self, pipeline_name, counter):
"""
Get a list of pipeline_counter indicating what to fetch for a pipeline.
Start at `counter` and go back (but not past 1).
Don't include instances we already have, don't fetch more than
self.chunk_size at a time, and never go back more than `self.max_to_sync`
from the initial value of `counter`.
"""
oldest_we_want = max(1, counter - self.max_to_sync + 1)
counters = []
while len(counters) < self.chunk_size:
if counter < oldest_we_want:
break
if not self.db.pipeline_instance_done(pipeline_name, counter):
counters.append(counter)
counter -= 1
return counters
def get_pipeline_history(self, pipeline_name, pipeline_counters):
"""
Get the history for given pipeline_name, and list of pipeline_counter.
Since we get the historical information in chunks, we store all historical
information we get from the go-server in pipeline_cache. If find the
pipeline counter we're looking for in the pipeline_cache, we get if from
there, otherwise, we get more history from the go-server.
"""
def add_to(some_pipeline_cache, offset=[0]):
"""
Fetch pipeline history and store in a dictionary.
Increase offset by page_size for each call.
Return whether we managed to add something or not.
"""
try:
history_json = self.go.request_pipeline_history(
pipeline_name, offset[0])
except LookupError:
return False
history = json.loads(history_json)
instances = history.get('pipelines', [])
for instance in instances:
some_pipeline_cache[instance['counter']] = instance
offset[0] += history["pagination"]["page_size"]
return len(instances) > 0
pipeline_history = []
pipeline_cache = {}
remaining_sorted_counters = sorted(pipeline_counters)
while remaining_sorted_counters:
ctr = remaining_sorted_counters[-1]
if ctr in pipeline_cache:
pipeline_history.append(pipeline_cache[ctr])
remaining_sorted_counters.remove(ctr)
elif pipeline_cache and min(pipeline_cache.keys()) < ctr:
# If the go-server had this instance, we would have
# found it by now. It's missing!
remaining_sorted_counters.remove(ctr)
else:
if not add_to(pipeline_cache):
break
return pipeline_history
def check_notification_needs(self, pipeline_instance):
pass
def notify_breakers(self):
pass
class JsonNodes:
"""
Parse a Python data structure coming from json, and build a
list of (key, value) pairs. The keys show the hierarchy using
dot notation. E.g. {'a': ['b': 6, 'o': 0]} should put
[('a.b', 6), ('a.o', 0)] in its .nodes attribute.
"""
def __init__(self, json_structure, prefix=None):
"""
Delegate lists and dicts, and solve the trivial case
"""
if isinstance(json_structure, list):
self.nodes = self.json_nodes_list(json_structure, prefix)
elif isinstance(json_structure, dict):
self.nodes = self.json_nodes_dict(json_structure, prefix)
else:
# If this was neither a list nor a dict, it's a final value,
# and the path to it is already in the prefix.
# Return a list like the cases above would.
self.nodes = [(prefix, json_structure)]
@classmethod
def json_nodes_list(cls, json_structure, prefix=None):
result = []
for elm in json_structure:
result.extend(cls(elm, prefix).nodes)
return result
@classmethod
def json_nodes_dict(cls, json_structure, prefix=None):
result = []
for key, value in json_structure.items():
if not prefix:
new_prefix = key
else:
new_prefix = prefix + '.' + key
result.extend(cls(value, new_prefix).nodes)
return result
if __name__ == '__main__':
setup_go_client(parse_args())
go = go_client.go_client()
db = data_access.get_connection(app_config.get_app_config().cfg['DB_PATH'])
controller = SyncController(db, go)
log("Starting synchronization.")
controller.sync()
log("Synchronization finished.")
log('Done!')
|
from __future__ import annotations
import typing as ty
import aurflux
import discord
import itertools as itt
import collections as clc
import asyncio as aio
if ty.TYPE_CHECKING:
import datetime
def message2embed(message: discord.Message, embed_color: discord.Color = None):
embeds = []
# for image in message.
print("Converting!@")
print(len(message.embeds))
print(len(message.attachments))
for m, embed in itt.zip_longest([message], [*message.embeds, *message.attachments], fillvalue=None):
print("New embed!")
print(m)
print(embed)
new_embed = discord.Embed()
if isinstance(embed, discord.Embed) and (embed.title or embed.description):
new_embed = embed
new_embed.description = (str(new_embed.description) if new_embed.description != discord.Embed.Empty else "") + f"\n\n[Jump to message]({message.jump_url})"
embeds.append(new_embed)
continue
if m:
new_embed.timestamp = m.created_at
new_embed.set_author(name=m.author.name, icon_url=m.author.avatar_url, url=m.jump_url)
new_embed.description = f"{m.content[:1900] + ('...' if len(m.content) > 1900 else '')}"
new_embed.set_footer(text=f"#{m.channel.name} | Sent at {m.created_at.isoformat('@').replace('@', ' at ')[:-7]}")
if isinstance(embed, discord.Attachment):
new_embed.set_image(url=embed.url)
if isinstance(embed, discord.Embed) and embed.url:
if embed.thumbnail:
new_embed.set_image(url=embed.thumbnail.url)
else:
new_embed.set_image(url=embed.url)
#
# if embed is not None and embed.url:
# print("image!")
# print(embed.url
# new_embed.set_image(url=embed.url)
# elif embed is not None and (embed.title or embed.description):
# new_embed = embed
#
# if new_embed.description:
new_embed.description = (str(new_embed.description) if new_embed.description != discord.Embed.Empty else "") + f"\n\n[Jump to message]({message.jump_url})"
print(new_embed.to_dict())
print("_----------")
embeds.append(new_embed)
#
# for embed in message.embeds:
# print(embed.to_dict())
# if embed.description or embed.title:
# embed.description = f"{embed.description}\n\n[Jump to message]({message.jump_url})"
# embeds.append(embed)
# else:
# new_embed = discord.Embed(
# timestamp=message.created_at
# )
# new_embed.set_author(name=message.author.name, icon_url=message.author.avatar_url, url=message.jump_url)
#
# new_embed.description = f"{message.content[:1900] + ('...' if len(message.content) > 1900 else '')}\n\n[Jump to message]({message.jump_url})"
# new_embed.set_footer(text=f"#{message.channel.name} | Sent at {message.created_at.isoformat('@').replace('@', ' at ')[:-7]}")
# if embed.url:
# new_embed.set_image(url=embed.url)
# new_embed.description = f"{embed.description}\n\n[Jump to message]({message.jump_url})"
# embeds.append(embed)
#
# for attachment in message.attachments:
# new_embed = discord.Embed(
# timestamp=message.created_at
# )
# new_embed.set_author(name=message.author.name, icon_url=message.author.avatar_url, url=message.jump_url)
#
# # new_embed.description = f"{message.content[:1900] + ('...' if len(message.content) > 1900 else '')}\n\n[Jump to message]({message.jump_url})"
# new_embed.set_footer(text=f"#{message.channel.name} | Sent at {message.created_at.isoformat('@').replace('@', ' at ')[:-7]}")
# new_embed.set_image(url=attachment.url)
# new_embed.description = f"[Jump to message]({message.jump_url})"
# embeds.append(new_embed)
# print(embeds)
return embeds
# return [new_embed, *message.embeds]
# if message.embeds:
# for m_embed in message.embeds:
# extra_embed = discord.Embed()
# if m_embed.url:
# extra_embed.set_image(url=m_embed.url)
# if m_embed.image:
# extra_embed.set_image(url=m_embed.image.url)
# if m_embed.video:
# extra_embed._video = m_embed._video
#
# break
# if message.attachments:
# for attachment in message.attachments:
# if attachment.url:
# new_embed.set_image(url=attachment.url)
# break
# if embed_color:
# new_embed.colour = embed_color
class PinHandler(aurflux.AurfluxCog):
listening_channels = set()
def __init__(self, aurflux: aurflux.Aurflux):
super().__init__(aurflux)
self.locks: ty.Dict[str, aio.Lock] = clc.defaultdict(aio.Lock)
def route(self):
@self.router.endpoint("aurflux:guild_channel_pins_update", decompose=True)
async def message_update_handler(channel: discord.TextChannel, last_pin: datetime.datetime):
print("updating!")
print( self.aurflux.CONFIG.of(channel.guild.id)["pinmap"])
async with self.locks[channel.id]:
if channel.id not in (config := self.aurflux.CONFIG.of(channel.guild.id))["pinmap"]:
return
print("!")
pins: ty.List[discord.Message] = sorted(await channel.pins(), key=lambda x: x.created_at)
print(f"{len(pins)} in {channel}")
num_to_unpin = max(0, len(pins) - config["maxmap"][channel.id])
print(num_to_unpin)
for pin in pins[:num_to_unpin]:
for embed in message2embed(pin):
await self.aurflux.get_channel(config["pinmap"][channel.id]).send(
embed=embed
)
await pin.unpin()
del self.locks[channel.id]
|
import numpy as np
from numpy.random import RandomState
from qgomoku.core.board import Board
from qgomoku.learner import pexp_mind
minds = []
SIZE = 9
def run():
mind = pexp_mind.PExpMind(size=SIZE, init=False, channels=4)
mind.load_net('../models/9_4_4')
rs = RandomState(42)
for i in range(50):
board = Board(size=SIZE, win_chain_length=5, draw_point=50)
print('Game', i)
# randomize the board a bit
for j in range(rs.randint(0, 10)):
board.make_random_move()
# board.move(2, 2)
# board.move(0, 1)
# board.move(2, 3)
# board.move(2, 1)
# board.move(2, 4)
# board.move(3, 1)
# board.move(4, 3)
# board.move(3, 4)
# board.move(6, 6)
# board.move(4, 1)
# board.move(0, 0)
# board.move(1, 0)
# board.move(0, 4)
# board.move(2, 2)
# board.move(0, 7)
# board.move(3, 0)
# board.move(1, 2)
# board.move(3, 7)
# board.move(1, 6)
# board.move(4, 4)
# board.move(1, 8)
# board.move(4, 5)
# board.move(2, 0)
# board.move(4, 6)
# board.move(2, 4)
# board.move(4, 8)
# board.move(2, 8)
# board.move(5, 0)
# board.move(3, 1)
# board.move(5, 4)
# board.move(3, 6)
# board.move(5, 8)
# board.move(4, 0)
# board.move(6, 3)
# board.move(4, 3)
# board.move(7, 2)
# board.move(4, 7)
# board.move(7, 6)
# board.move(6, 6)
# board.move(8, 0)
# board.move(8, 1)
# board.move(8, 8)
# board.move(8, 7)
print(board)
current_player = board.get_player_to_move()
def expanding_p(depth, p):
return np.logical_or.reduce([
np.logical_and(depth < 2, p > -6),
np.logical_and(depth < 4, p > -4),
np.logical_and(depth < 6, p > -4),
np.logical_and(depth < np.inf, p > -3)
])
def permissive_expansion(depth):
if depth < 2:
return np.inf
if depth < 8:
return 5
return 3
mind.define_policies(expanding_p, permissive_expansion, convergence_count=5,
alpha=0.2, q_exp_batch_size=SIZE ** 2,
p_exp_batch_size=SIZE ** 3, required_depth=6, max_iters=20)
while True:
result = mind.make_move(board,
as_player=current_player,
epsilon=0.1,
consistency_check=False,
verbose=True)
print(board.pprint())
if current_player == Board.FIRST_PLAYER:
current_player = Board.SECOND_PLAYER
else:
current_player = Board.FIRST_PLAYER
if result:
break
print('done')
if __name__ == "__main__":
# run()
import cProfile, pstats
from io import StringIO
pr = cProfile.Profile()
pr.enable()
run()
pr.disable()
s = StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
|
#!/usr/bin/env python
# Copyright 2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is NOT a POX component. It's a little tool to test out the messenger.
"""
import socket
import threading
import json
class JSONDestreamer (object):
import json
decoder = json.JSONDecoder()
def __init__ (self, callback = None):
self._buf = ''
self.callback = callback if callback else self.rx
def push (self, data):
if len(self._buf) == 0:
data = data.lstrip()
self._buf += data
try:
while len(self._buf) > 0:
r,off = self.decoder.raw_decode(self._buf)
self._buf = self._buf[off:].lstrip()
self.callback(r)
except ValueError:
pass
def rx (self, data):
import json
print "Recv:", json.dumps(data, indent=4)
jd = JSONDestreamer()
done = False
def reader (socket):
global done
while True:
d = socket.recv(1024)
if d == "":
done = True
break
jd.push(d)
cur_chan = None
def channel (ch):
global cur_chan
cur_chan = ch
import readline
def main (addr = "127.0.0.1", port = 6633):
print "Connecting to %s:%i" % (addr,port)
port = int(port)
sock = socket.create_connection((addr, port))
t = threading.Thread(target=reader, args=(sock,))
t.daemon = True
t.start()
while not done:
try:
#print ">",
m = raw_input()
if len(m) == 0: continue
m = eval(m)
if not isinstance(m, dict):
continue
if cur_chan is not None and 'CHANNEL' not in m:
m['CHANNEL'] = cur_chan
m = json.dumps(m)
sock.send(m)
except EOFError:
break
except KeyboardInterrupt:
break
except:
import traceback
traceback.print_exc()
if __name__ == "__main__":
import sys
main(*sys.argv[1:])
|
import numpy as np
import cv2
import pyopengv
import networkx as nx
import logging
from opensfm import context
from opensfm import multiview
from opensfm.unionfind import UnionFind
logger = logging.getLogger(__name__)
# pairwise matches
def match_lowe(index, f2, config):
search_params = dict(checks=config.get('flann_checks', 200))
results, dists = index.knnSearch(f2, 2, params=search_params)
squared_ratio = config.get('lowes_ratio', 0.6)**2 # Flann returns squared L2 distances
good = dists[:, 0] < squared_ratio * dists[:, 1]
matches = zip(results[good, 0], good.nonzero()[0])
return np.array(matches, dtype=int)
def match_symmetric(fi, indexi, fj, indexj, config):
if config.get('matcher_type', 'FLANN') == 'FLANN':
matches_ij = [(a,b) for a,b in match_lowe(indexi, fj, config)]
matches_ji = [(b,a) for a,b in match_lowe(indexj, fi, config)]
else:
matches_ij = [(a,b) for a,b in match_lowe_bf(fi, fj, config)]
matches_ji = [(b,a) for a,b in match_lowe_bf(fj, fi, config)]
matches = set(matches_ij).intersection(set(matches_ji))
return np.array(list(matches), dtype=int)
def convert_matches_to_vector(matches):
'''Convert Dmatch object to matrix form
'''
matches_vector = np.zeros((len(matches),2),dtype=np.int)
k = 0
for mm in matches:
matches_vector[k,0] = mm.queryIdx
matches_vector[k,1] = mm.trainIdx
k = k+1
return matches_vector
def match_lowe_bf(f1, f2, config):
'''Bruteforce feature matching
'''
assert(f1.dtype.type==f2.dtype.type)
if (f1.dtype.type == np.uint8):
matcher_type = 'BruteForce-Hamming'
else:
matcher_type = 'BruteForce'
matcher = cv2.DescriptorMatcher_create(matcher_type)
matches = matcher.knnMatch(f1, f2, k=2)
ratio = config.get('lowes_ratio', 0.6)
good_matches = []
for match in matches:
if match and len(match) == 2:
m, n = match
if m.distance < ratio * n.distance:
good_matches.append(m)
good_matches = convert_matches_to_vector(good_matches)
return np.array(good_matches, dtype=int)
def robust_match_fundamental(p1, p2, matches, config):
'''Computes robust matches by estimating the Fundamental matrix via RANSAC.
'''
if len(matches) < 8:
return np.array([])
p1 = p1[matches[:, 0]][:, :2].copy()
p2 = p2[matches[:, 1]][:, :2].copy()
FM_RANSAC = cv2.FM_RANSAC if context.OPENCV3 else cv2.cv.CV_FM_RANSAC
F, mask = cv2.findFundamentalMat(p1, p2, FM_RANSAC, config.get('robust_matching_threshold', 0.006), 0.9999)
inliers = mask.ravel().nonzero()
if F[2,2] == 0.0:
return []
return matches[inliers]
def compute_inliers_bearings(b1, b2, T):
R = T[:, :3]
t = T[:, 3]
p = pyopengv.triangulation_triangulate(b1, b2, t, R)
br1 = p.copy()
br1 /= np.linalg.norm(br1, axis=1)[:, np.newaxis]
br2 = R.T.dot((p - t).T).T
br2 /= np.linalg.norm(br2, axis=1)[:, np.newaxis]
ok1 = np.linalg.norm(br1 - b1, axis=1) < 0.01 # TODO(pau): compute angular error and use proper threshold
ok2 = np.linalg.norm(br2 - b2, axis=1) < 0.01
return ok1 * ok2
def robust_match_calibrated(p1, p2, camera1, camera2, matches, config):
'''Computes robust matches by estimating the Essential matrix via RANSAC.
'''
if len(matches) < 8:
return np.array([])
p1 = p1[matches[:, 0]][:, :2].copy()
p2 = p2[matches[:, 1]][:, :2].copy()
b1 = multiview.pixel_bearings(p1, camera1)
b2 = multiview.pixel_bearings(p2, camera2)
threshold = config['robust_matching_threshold']
T = pyopengv.relative_pose_ransac(b1, b2, "STEWENIUS", 1 - np.cos(threshold), 1000)
inliers = compute_inliers_bearings(b1, b2, T)
return matches[inliers]
def robust_match(p1, p2, camera1, camera2, matches, config):
if (camera1.get('projection_type', 'perspective') == 'perspective'
and camera2.get('projection_type', 'perspective') == 'perspective'
and camera1.get('k1', 0.0) == 0.0):
return robust_match_fundamental(p1, p2, matches, config)
else:
return robust_match_calibrated(p1, p2, camera1, camera2, matches, config)
def good_track(track, min_length):
if len(track) < min_length:
return False
images = [f[0] for f in track]
if len(images) != len(set(images)):
return False
return True
def create_tracks_graph(features, colors, matches, config):
logging.debug('Merging features onto tracks')
uf = UnionFind()
for im1, im2 in matches:
for f1, f2 in matches[im1, im2]:
uf.union((im1, f1), (im2, f2))
sets = {}
for i in uf:
p = uf[i]
if p in sets:
sets[p].append(i)
else:
sets[p] = [i]
tracks = [t for t in sets.values() if good_track(t, config.get('min_track_length', 2))]
logging.debug('Good tracks: {}'.format(len(tracks)))
tracks_graph = nx.Graph()
for track_id, track in enumerate(tracks):
for image_feature in track:
image = image_feature[0]
featureid = image_feature[1]
x, y = features[image][featureid]
r, g, b = colors[image][featureid]
tracks_graph.add_node(image, bipartite=0)
tracks_graph.add_node(str(track_id), bipartite=1)
tracks_graph.add_edge(image, str(track_id), feature=(x,y), feature_id=featureid, feature_color=(float(r),float(g),float(b)))
return tracks_graph
|
import warnings as test_warnings
from unittest.mock import patch
import pytest
from rotkehlchen.assets.asset import Asset
from rotkehlchen.assets.converters import UNSUPPORTED_POLONIEX_ASSETS, asset_from_poloniex
from rotkehlchen.constants.assets import A_BTC, A_ETH
from rotkehlchen.errors import DeserializationError, UnknownAsset, UnsupportedAsset
from rotkehlchen.exchanges.data_structures import Loan, Trade, TradeType
from rotkehlchen.exchanges.poloniex import Poloniex, process_polo_loans, trade_from_poloniex
from rotkehlchen.fval import FVal
from rotkehlchen.tests.utils.constants import A_DASH
from rotkehlchen.tests.utils.exchanges import (
POLONIEX_BALANCES_RESPONSE,
POLONIEX_MOCK_DEPOSIT_WITHDRAWALS_RESPONSE,
POLONIEX_TRADES_RESPONSE,
)
from rotkehlchen.tests.utils.mock import MockResponse
from rotkehlchen.typing import AssetMovementCategory, Location, Timestamp
from rotkehlchen.user_messages import MessagesAggregator
TEST_RATE_STR = '0.00022999'
TEST_AMOUNT_STR = '613.79427133'
TEST_PERC_FEE_STR = '0.0015'
TEST_POLO_TRADE = {
'globalTradeID': 192167,
'tradeID': 3727,
'date': '2017-07-22 21:18:37',
'rate': TEST_RATE_STR,
'amount': TEST_AMOUNT_STR,
'total': '0.14116654',
'fee': TEST_PERC_FEE_STR,
'orderNumber': '2315432',
'type': 'sell',
'category': 'exchange',
}
TEST_POLO_LOAN_1 = {
'id': 3, # we don't read that in Rotkehlchen
'rate': '0.001', # we don't read that in Rotkehlchen
'duration': '0.001', # we don't read that in Rotkehlchen
'interest': '0.00000005', # we don't read that in Rotkehlchen
'open': '2017-01-24 06:05:04',
'close': '2017-01-24 10:05:04',
'currency': 'DASH', # cryptocompare hourly DASH/EUR: 13.22106438
'fee': '0.00015',
'earned': '0.003',
'amount': '2',
}
TEST_POLO_LOAN_2 = {
'id': 4, # we don't read that in Rotkehlchen
'rate': '0.001', # we don't read that in Rotkehlchen
'duration': '0.001', # we don't read that in Rotkehlchen
'interest': '0.00000005', # we don't read that in Rotkehlchen
'open': '2017-02-13 19:07:01',
'close': '2017-02-13 23:05:04',
'currency': 'DASH', # cryptocompare hourly DASH/EUR: 15.73995672
'fee': '0.00011',
'earned': '0.0035',
'amount': '2',
}
def test_name():
exchange = Poloniex('a', b'a', object(), object())
assert exchange.name == 'poloniex'
def test_trade_from_poloniex():
amount = FVal(TEST_AMOUNT_STR)
rate = FVal(TEST_RATE_STR)
perc_fee = FVal(TEST_PERC_FEE_STR)
cost = amount * rate
trade = trade_from_poloniex(TEST_POLO_TRADE, 'BTC_ETH')
assert isinstance(trade, Trade)
assert isinstance(trade.timestamp, int)
assert trade.timestamp == 1500758317
assert trade.trade_type == TradeType.SELL
assert trade.rate == rate
assert trade.amount == amount
assert trade.pair == 'ETH_BTC'
assert trade.fee == cost * perc_fee
assert trade.fee_currency == 'BTC'
assert trade.location == Location.POLONIEX
def test_poloniex_trade_deserialization_errors():
test_trade = TEST_POLO_TRADE.copy()
test_trade['date'] = '2017/07/22 1:18:37'
with pytest.raises(DeserializationError):
trade_from_poloniex(test_trade, 'BTC_ETH')
test_trade = TEST_POLO_TRADE.copy()
test_trade['type'] = 'lololol'
with pytest.raises(DeserializationError):
trade_from_poloniex(test_trade, 'BTC_ETH')
test_trade = TEST_POLO_TRADE.copy()
test_trade['amount'] = None
with pytest.raises(DeserializationError):
trade_from_poloniex(test_trade, 'BTC_ETH')
test_trade = TEST_POLO_TRADE.copy()
test_trade['rate'] = None
with pytest.raises(DeserializationError):
trade_from_poloniex(test_trade, 'BTC_ETH')
test_trade = TEST_POLO_TRADE.copy()
test_trade['fee'] = ['a']
with pytest.raises(DeserializationError):
trade_from_poloniex(test_trade, 'BTC_ETH')
test_trade = TEST_POLO_TRADE.copy()
del test_trade['rate']
with pytest.raises(DeserializationError):
trade_from_poloniex(test_trade, 'BTC_ETH')
def test_process_polo_loans():
raw_data = [TEST_POLO_LOAN_1, TEST_POLO_LOAN_2]
msg_aggregator = MessagesAggregator()
loans = process_polo_loans(msg_aggregator, raw_data, 0, 1564262858)
assert len(loans) == 2
assert isinstance(loans[0], Loan)
assert loans[0].open_time == Timestamp(1485237904)
assert loans[0].close_time == Timestamp(1485252304)
assert isinstance(loans[0].currency, Asset)
assert loans[0].currency == A_DASH
assert loans[0].fee == FVal('0.00015')
assert loans[0].earned == FVal('0.003')
assert loans[0].amount_lent == FVal('2')
assert isinstance(loans[1], Loan)
assert loans[1].open_time == Timestamp(1487012821)
assert loans[1].close_time == Timestamp(1487027104)
assert isinstance(loans[1].currency, Asset)
assert loans[1].currency == A_DASH
assert loans[1].fee == FVal('0.00011')
assert loans[1].earned == FVal('0.0035')
assert loans[1].amount_lent == FVal('2')
# Test different start/end timestamps
loans = process_polo_loans(msg_aggregator, raw_data, 1485252305, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
loans = process_polo_loans(msg_aggregator, raw_data, 0, 1487012820)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1485252304)
def test_process_polo_loans_unexpected_data():
"""Test that with unexpected data the offending loan is skipped and an error generated"""
msg_aggregator = MessagesAggregator()
broken_loan = TEST_POLO_LOAN_1.copy()
broken_loan['close'] = 'xx2017-xxs07-22 21:18:37'
loans = process_polo_loans(msg_aggregator, [broken_loan, TEST_POLO_LOAN_2], 0, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
assert len(msg_aggregator.consume_errors()) == 1
broken_loan = TEST_POLO_LOAN_1.copy()
broken_loan['open'] = 'xx2017-xxs07-22 21:18:37'
loans = process_polo_loans(msg_aggregator, [broken_loan, TEST_POLO_LOAN_2], 0, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
assert len(msg_aggregator.consume_errors()) == 1
broken_loan = TEST_POLO_LOAN_1.copy()
broken_loan['fee'] = 'sdad'
loans = process_polo_loans(msg_aggregator, [broken_loan, TEST_POLO_LOAN_2], 0, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
assert len(msg_aggregator.consume_errors()) == 1
broken_loan = TEST_POLO_LOAN_1.copy()
broken_loan['earned'] = None
loans = process_polo_loans(msg_aggregator, [broken_loan, TEST_POLO_LOAN_2], 0, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
assert len(msg_aggregator.consume_errors()) == 1
broken_loan = TEST_POLO_LOAN_1.copy()
broken_loan['amount'] = ['something']
loans = process_polo_loans(msg_aggregator, [broken_loan, TEST_POLO_LOAN_2], 0, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
assert len(msg_aggregator.consume_errors()) == 1
# And finally test that missing an expected entry is also handled
broken_loan = TEST_POLO_LOAN_1.copy()
del broken_loan['amount']
loans = process_polo_loans(msg_aggregator, [broken_loan, TEST_POLO_LOAN_2], 0, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
assert len(msg_aggregator.consume_errors()) == 1
def test_poloniex_trade_with_asset_needing_conversion():
amount = FVal(613.79427133)
rate = FVal(0.00022999)
perc_fee = FVal(0.0015)
poloniex_trade = {
'globalTradeID': 192167,
'tradeID': FVal(3727.0),
'date': '2017-07-22 21:18:37',
'rate': rate,
'amount': amount,
'total': FVal(0.14116654),
'fee': perc_fee,
'orderNumber': FVal(2315432.0),
'type': 'sell',
'category': 'exchange',
}
trade = trade_from_poloniex(poloniex_trade, 'AIR_BTC')
assert trade.pair == 'BTC_AIR-2'
assert trade.location == Location.POLONIEX
def test_query_trade_history(function_scope_poloniex):
"""Happy path test for poloniex trade history querying"""
poloniex = function_scope_poloniex
def mock_api_return(url, req): # pylint: disable=unused-argument
return MockResponse(200, POLONIEX_TRADES_RESPONSE)
with patch.object(poloniex.session, 'post', side_effect=mock_api_return):
trades = poloniex.query_trade_history(
start_ts=0,
end_ts=1565732120,
)
assert len(trades) == 2
assert trades[0].timestamp == 1539713117
assert trades[0].location == Location.POLONIEX
assert trades[0].pair == 'BCH_BTC'
assert trades[0].trade_type == TradeType.SELL
assert trades[0].amount == FVal('1.40308443')
assert trades[0].rate == FVal('0.06935244')
assert trades[0].fee.is_close(FVal('0.00009730732'))
assert isinstance(trades[0].fee_currency, Asset)
assert trades[0].fee_currency == A_BTC
assert trades[1].timestamp == 1539709423
assert trades[1].location == Location.POLONIEX
assert trades[1].pair == 'ETH_BTC'
assert trades[1].trade_type == TradeType.BUY
assert trades[1].amount == FVal('3600.53748129')
assert trades[1].rate == FVal('0.00003432')
assert trades[1].fee.is_close(FVal('7.20107496258'))
assert isinstance(trades[1].fee_currency, Asset)
assert trades[1].fee_currency == A_ETH
def test_query_trade_history_unexpected_data(function_scope_poloniex):
"""Test that poloniex trade history querying returning unexpected data is handled gracefully"""
poloniex = function_scope_poloniex
poloniex.cache_ttl_secs = 0
def mock_poloniex_and_query(given_trades, expected_warnings_num, expected_errors_num):
def mock_api_return(url, req): # pylint: disable=unused-argument
return MockResponse(200, given_trades)
with patch.object(poloniex.session, 'post', side_effect=mock_api_return):
trades = poloniex.query_online_trade_history(
start_ts=0,
end_ts=1565732120,
)
if expected_errors_num == 0 and expected_warnings_num == 0:
assert len(trades) == 1
else:
assert len(trades) == 0
warnings = poloniex.msg_aggregator.consume_warnings()
assert len(warnings) == expected_warnings_num
errors = poloniex.msg_aggregator.consume_errors()
assert len(errors) == expected_errors_num
input_trades = """{"BTC_ETH":
[{ "globalTradeID": 394127361,
"tradeID": "13536350",
"date": "2018-10-16 17:03:43",
"rate": "0.00003432",
"amount": "3600.53748129",
"total": "0.12357044",
"fee": "0.00200000",
"orderNumber": "96238912841",
"type": "buy",
"category": "exchange"}]}"""
# First make sure it works with normal data
mock_poloniex_and_query(input_trades, expected_warnings_num=0, expected_errors_num=0)
# from here and on invalid data
# invalid timestamp
given_input = input_trades.replace('"2018-10-16 17:03:43"', '"435345"')
mock_poloniex_and_query(given_input, expected_warnings_num=0, expected_errors_num=1)
# invalid pair
given_input = input_trades.replace('"BTC_ETH"', '"0"')
mock_poloniex_and_query(given_input, expected_warnings_num=0, expected_errors_num=1)
# pair with unknown asset
given_input = input_trades.replace('"BTC_ETH"', '"BTC_SDSDSD"')
mock_poloniex_and_query(given_input, expected_warnings_num=1, expected_errors_num=0)
# pair with unsupported asset
given_input = input_trades.replace('"BTC_ETH"', '"BTC_BALLS"')
mock_poloniex_and_query(given_input, expected_warnings_num=1, expected_errors_num=0)
# pair with unsupported asset
given_input = input_trades.replace('"BTC_ETH"', '"BTC_BALLS"')
mock_poloniex_and_query(given_input, expected_warnings_num=1, expected_errors_num=0)
# invalid rate
given_input = input_trades.replace('"0.00003432"', 'null')
mock_poloniex_and_query(given_input, expected_warnings_num=0, expected_errors_num=1)
# invalid amount
given_input = input_trades.replace('"3600.53748129"', '"dsadsd"')
mock_poloniex_and_query(given_input, expected_warnings_num=0, expected_errors_num=1)
# invalid fee
given_input = input_trades.replace('"0.00200000"', '"dasdsad"')
mock_poloniex_and_query(given_input, expected_warnings_num=0, expected_errors_num=1)
# invalid trade type
given_input = input_trades.replace('"buy"', '"dasdsdad"')
mock_poloniex_and_query(given_input, expected_warnings_num=0, expected_errors_num=1)
# invalid category
given_input = input_trades.replace('"exchange"', '"dsadsdsadd"')
mock_poloniex_and_query(given_input, expected_warnings_num=0, expected_errors_num=1)
def test_poloniex_assets_are_known(poloniex):
currencies = poloniex.return_currencies()
for poloniex_asset in currencies.keys():
try:
_ = asset_from_poloniex(poloniex_asset)
except UnsupportedAsset:
assert poloniex_asset in UNSUPPORTED_POLONIEX_ASSETS
except UnknownAsset as e:
test_warnings.warn(UserWarning(
f'Found unknown asset {e.asset_name} in Poloniex. Support for it has to be added',
))
@pytest.mark.parametrize('use_clean_caching_directory', [True])
def test_poloniex_query_balances_unknown_asset(function_scope_poloniex):
"""Test that if a poloniex balance query returns unknown asset no exception
is raised and a warning is generated. Same for unsupported assets"""
poloniex = function_scope_poloniex
def mock_unknown_asset_return(url, req): # pylint: disable=unused-argument
return MockResponse(200, POLONIEX_BALANCES_RESPONSE)
with patch.object(poloniex.session, 'post', side_effect=mock_unknown_asset_return):
# Test that after querying the assets only ETH and BTC are there
balances, msg = poloniex.query_balances()
assert msg == ''
assert len(balances) == 2
assert balances[A_BTC]['amount'] == FVal('5.5')
assert balances[A_ETH]['amount'] == FVal('11.0')
warnings = poloniex.msg_aggregator.consume_warnings()
assert len(warnings) == 2
assert 'unknown poloniex asset IDONTEXIST' in warnings[0]
assert 'unsupported poloniex asset CNOTE' in warnings[1]
@pytest.mark.parametrize('use_clean_caching_directory', [True])
def test_poloniex_deposits_withdrawal_unknown_asset(function_scope_poloniex):
"""Test that if a poloniex asset movement query returns unknown asset no exception
is raised and a warning is generated. Same for unsupported assets"""
poloniex = function_scope_poloniex
def mock_api_return(url, req): # pylint: disable=unused-argument
response = MockResponse(
200,
POLONIEX_MOCK_DEPOSIT_WITHDRAWALS_RESPONSE,
)
return response
with patch.object(poloniex.session, 'post', side_effect=mock_api_return):
# Test that after querying the api only ETH and BTC assets are there
asset_movements = poloniex.query_online_deposits_withdrawals(
start_ts=0,
end_ts=1488994442,
)
assert len(asset_movements) == 4
assert asset_movements[0].category == AssetMovementCategory.WITHDRAWAL
assert asset_movements[0].timestamp == 1458994442
assert asset_movements[0].asset == A_BTC
assert asset_movements[0].amount == FVal('5.0')
assert asset_movements[0].fee == FVal('0.5')
assert asset_movements[1].category == AssetMovementCategory.WITHDRAWAL
assert asset_movements[1].timestamp == 1468994442
assert asset_movements[1].asset == A_ETH
assert asset_movements[1].amount == FVal('10.0')
assert asset_movements[1].fee == FVal('0.1')
assert asset_movements[2].category == AssetMovementCategory.DEPOSIT
assert asset_movements[2].timestamp == 1448994442
assert asset_movements[2].asset == A_BTC
assert asset_movements[2].amount == FVal('50.0')
assert asset_movements[3].category == AssetMovementCategory.DEPOSIT
assert asset_movements[3].timestamp == 1438994442
assert asset_movements[3].asset == A_ETH
assert asset_movements[3].amount == FVal('100.0')
warnings = poloniex.msg_aggregator.consume_warnings()
assert len(warnings) == 4
assert 'Found withdrawal of unknown poloniex asset IDONTEXIST' in warnings[0]
assert 'Found withdrawal of unsupported poloniex asset DIS' in warnings[1]
assert 'Found deposit of unknown poloniex asset IDONTEXIST' in warnings[2]
assert 'Found deposit of unsupported poloniex asset EBT' in warnings[3]
@pytest.mark.parametrize('use_clean_caching_directory', [True])
def test_poloniex_deposits_withdrawal_null_fee(function_scope_poloniex):
"""
Test that if a poloniex asset movement query returns null for fee we don't crash.
Regression test for issue #76
"""
poloniex = function_scope_poloniex
def mock_api_return(url, req): # pylint: disable=unused-argument
response = MockResponse(
200,
'{"withdrawals": [{"currency": "FAC", "timestamp": 1478994442, '
'"amount": "100.5", "fee": null, "withdrawalNumber": 1}], "deposits": []}',
)
return response
with patch.object(poloniex.session, 'post', side_effect=mock_api_return):
asset_movements = poloniex.query_online_deposits_withdrawals(
start_ts=0,
end_ts=1488994442,
)
assert len(asset_movements) == 1
assert asset_movements[0].category == AssetMovementCategory.WITHDRAWAL
assert asset_movements[0].timestamp == 1478994442
assert asset_movements[0].asset == Asset('FAIR')
assert asset_movements[0].amount == FVal('100.5')
assert asset_movements[0].fee == FVal('0')
warnings = poloniex.msg_aggregator.consume_warnings()
assert len(warnings) == 0
@pytest.mark.parametrize('use_clean_caching_directory', [True])
def test_poloniex_deposits_withdrawal_unexpected_data(function_scope_poloniex):
"""
Test that if a poloniex asset movement query returns unexpected data we handle it gracefully
"""
poloniex = function_scope_poloniex
poloniex.cache_ttl_secs = 0
def mock_poloniex_and_query(given_movements, expected_warnings_num, expected_errors_num):
def mock_api_return(url, req): # pylint: disable=unused-argument
return MockResponse(200, given_movements)
with patch.object(poloniex.session, 'post', side_effect=mock_api_return):
asset_movements = poloniex.query_online_deposits_withdrawals(
start_ts=0,
end_ts=1488994442,
)
if expected_errors_num == 0 and expected_warnings_num == 0:
assert len(asset_movements) == 1
else:
assert len(asset_movements) == 0
warnings = poloniex.msg_aggregator.consume_warnings()
assert len(warnings) == expected_warnings_num
errors = poloniex.msg_aggregator.consume_errors()
assert len(errors) == expected_errors_num
def check_permutations_of_input_invalid_data(given_input):
# First make sure it works with normal data
mock_poloniex_and_query(given_input, expected_warnings_num=0, expected_errors_num=0)
# From here and on test unexpected data
# invalid timestamp
movements = given_input.replace('1478994442', '"dasdsd"')
mock_poloniex_and_query(movements, expected_warnings_num=0, expected_errors_num=1)
# invalid amount
movements = given_input.replace('"100.5"', 'null')
mock_poloniex_and_query(movements, expected_warnings_num=0, expected_errors_num=1)
# invalid fee
if 'fee' in given_input:
movements = given_input.replace('"0.1"', '"dasdsdsad"')
mock_poloniex_and_query(movements, expected_warnings_num=0, expected_errors_num=1)
# invalid currency type
movements = given_input.replace('"FAC"', '[]')
mock_poloniex_and_query(movements, expected_warnings_num=0, expected_errors_num=1)
# unknown currency
movements = given_input.replace('"FAC"', '"DSDSDSD"')
mock_poloniex_and_query(movements, expected_warnings_num=1, expected_errors_num=0)
# missing key error
movements = given_input.replace('"timestamp": 1478994442,', '')
mock_poloniex_and_query(movements, expected_warnings_num=0, expected_errors_num=1)
input_withdrawals = """
{"withdrawals": [{"currency": "FAC", "timestamp": 1478994442,
"amount": "100.5", "fee": "0.1", "withdrawalNumber": 1}], "deposits": []}"""
check_permutations_of_input_invalid_data(input_withdrawals)
input_deposits = """
{"deposits": [{"currency": "FAC", "timestamp": 1478994442,
"amount": "100.5", "depositNumber": 1}], "withdrawals": []}"""
check_permutations_of_input_invalid_data(input_deposits)
|
'''
This file is part of GFLIB toolbox
First Version Sept. 2018
Cite this project as:
Mezher M., Abbod M. (2011) Genetic Folding: A New Class of Evolutionary Algorithms.
In: Bramer M., Petridis M., Hopgood A. (eds) Research and Development in Intelligent Systems XXVII.
SGAI 2010. Springer, London
Copyright (C) 20011-2018 Mohd A. Mezher (mohabedalgani@gmail.com)
'''
# This script generates logic synthesis data
# 6-multiplexer, odd-3-parity, odd-5-parity
import numpy as np
# 6-multiplexer
inputs = np.random.randint(0, 2, 64*6).reshape((64,6))
outputs = []
for i in range(len(inputs)):
if (inputs[i, 0] == 0) & (inputs[i, 1] == 0):
outputs.append(inputs[i, 2])
elif (inputs[i, 0] == 0) & (inputs[i, 1] == 1):
outputs.append(inputs[i, 3])
elif (inputs[i, 0] == 1) & (inputs[i, 1] == 0):
outputs.append(inputs[i, 4])
elif (inputs[i, 0] == 1) & (inputs[i, 1] == 1):
outputs.append(inputs[i, 5])
outputs = np.array(outputs).reshape(-1,1)
data = np.hstack([inputs, outputs])
np.savetxt('data/binary/logic_6_multiplexer.txt', data, delimiter=',', fmt='%i')
# odd-3-parity
data = np.empty((8,4))
for i in range(0, 8):
b = np.binary_repr(i, 3)
n = 0
for j in range(3):
if b[j] == '1':
n += 1
data[i, :-1] = list(b)
if (n == 1) | (n == 3):
data[i, -1] = 1
else:
data[i, -1] = 0
np.savetxt('data/binary/odd_3_parity.txt', data, delimiter=',', fmt='%i')
# odd-7-parity
data = np.empty((127,8))
for i in range(0, 127):
b = np.binary_repr(i, 7)
n = 0
for j in range(7):
if b[j] == '1':
n += 1
data[i, :-1] = list(b)
if (n == 1) | (n == 3) | (n == 5) | (n == 7):
data[i, -1] = 1
else:
data[i, -1] = 0
np.savetxt('data/binary/odd_7_parity.txt', data, delimiter=',', fmt='%i')
|
#Skill : Ceil, list
#MS Excel column titles have the following pattern: A, B, C, ..., Z, AA, AB, ..., AZ, BA, BB, ..., ZZ, AAA, AAB, ... etc. In other words, column 1 is named "A", column 2 is "B", column 26 is "Z", column 27 is "AA" and so forth. Given a positive integer, find its corresponding column name.
#Examples:
#Input: 26
#Output: Z
#Input: 51
#Output: AY
#Input: 52
#Output: AZ
#Input: 676
#Output: YZ
#Input: 702
#Output: ZZ
#Input: 704
#Output: AAB
#Here is a starting point:
#ANalysis
#It is a number base26 conversion problem
#Iterately divide the number by 26
# the remainder translate to Character chr(rem + 65)
# the divide carry into next loop until number = 0
from collections import deque
import math
class Solution:
def convertToTitle(self, n):
# Fill this in.
num = n
result = deque()
while num > 0:
rem = num % 26
rem = 64 + ( 26 if rem == 0 else rem )
ch = chr (rem)
result.appendleft( ch )
num = max(math.ceil(num/26)-1,0)
return "".join(result)
if __name__ == "__main__":
print(Solution().convertToTitle(51))
input1 = 1
input2 = 456976
input3 = 28
print(Solution().convertToTitle(input1))
# A
print(Solution().convertToTitle(input2))
# YYYZ
print(Solution().convertToTitle(input3))
# AB
|
import logging
from unittest.mock import patch
from rich.logging import RichHandler
import app
@patch("app.LOG_LEVEL", "DEBUG")
def test_logging(capsys):
assert app.LOG_LEVEL == "DEBUG"
assert isinstance(app.logging.root.handlers[0], RichHandler)
app.logging.root.setLevel(logging.DEBUG)
assert app.logging.root.getEffectiveLevel() == logging.DEBUG
for name in logging.root.manager.loggerDict.keys():
assert logging.getLogger(name).handlers == []
assert logging.getLogger(name).propagate is True
logging.debug("This is a debug message.")
logging.info("This is an info message.")
logging.warning("This is a warning message.")
logging.error("This is an error message.")
logging.critical("This is a critical message.")
out, err = capsys.readouterr()
assert err == ""
for message in ("debug", "info", "error", "critical"):
assert message in out
|
# demo temperatuur sensor DHT22
# Configuration:
# - temperature/humidity sensor DHT22 on GPIO12
# - lED op GPIO14
# 2017-1003 PePo OLED output, Huzzah
import machine,time
import dht
import ssd1306
# create DHT22 sensor object
sensor = dht.DHT22(machine.Pin(12))
# create LED object on pin GPIO14
led = machine.Pin(14, machine.Pin.OUT)
#led.value(0)
# create i2c object: SCL=pin 5, SDA=pin 4
i2c = machine.I2C(scl=machine.Pin(5), sda=machine.Pin(4))
i2c.scan() #test -> [60], dan OLED correct aangesloten
# create I2C OLED object: 128 * 32 pixels
oled = ssd1306.SSD1306_I2C(128, 32, i2c)
oled.fill(0) # blank oleD
oled.show()
# demo - led=on when Humidity > h_threshold
def run(threshold = 80, dt=2.5):
# get sensor data
sensor.measure()
t = sensor.temperature()
h = sensor.humidity()
#console: print('T:{0} Celsius, H:{1} %'.format(t, h ))
# display sensor data on OLED
oled.text('Sensor DHT22',0, 0)
oled.text('T {0:0.1f} Celsius'.format(t),0, 10)
oled.text('H {0:0.1f} %'.format(h),0, 20)
oled.show()
# check threshold
if h > threshold:
led.value(1)
print('Alert!')
else:
led.value(0)
# wait between measurements
# time delay must be at least 2 seconds for sensor
time.sleep(dt)
#execute
try:
while True:
# blank oleD
oled.fill(0)
oled.show()
run()
except:
# blank oleD
oled.fill(0)
oled.show()
print('demo done!')
|
import sqlite3
import pandas as pd
def save_dataframe_to_sql(data_frame, table_name):
"""
Save dataframe specified as SQL table (name provided as tableNmae) in Starbucks.db
INPUT:
data_frame: DataFrame
table_name: String
OUTPUT:
None
"""
sql_connect = sqlite3.connect('starbucks.db')
data_frame.to_sql(table_name, sql_connect, if_exists='replace')
def read_dataframe_from_sql(query):
"""
Read table from Starbucks.db to dataframe
INPUT:
query: String
OUTPUT:
DataFrame
"""
sql_connect = sqlite3.connect('starbucks.db')
return pd.read_sql_query(query,sql_connect)
|
"""
Subspace Product API
# Introduction The Subspace API is based on REST, has resource-oriented URLs, returns JSON-encoded responses, and returns standard HTTP response codes. The base URL for the API is: `https://api.subspace.com/` # Naming Convention * Version name currently in use is: *v1* * Example: `https://api.subspace.com/v1` # Authentication ## API Tokens Subspace authenticates your API requests using JWT Bearer tokens. To use any Subspace API, you must pass a Bearer token with each request. If you do not include your Bearer token when making an API request, or use one that is incorrect or disabled, Subspace returns an error. Bearer tokens are granted by requesting one (as noted below) and presenting your publishable (client_id) and secret (client_secret) tokens. Subspace provides two types of API tokens: publishable (client_id) and secret (client_secret). These are available in the Subspace console. * **Publishable** API tokens (client_id) are meant solely to identify your account with Subspace, they aren’t secret. They can be published in places like your website JavaScript code, or in an iPhone or Android app. * **Secret** API tokens (client_secret) should be kept confidential and only stored on your own servers. Your account’s secret API token will allow you to acquire a valid JWT token authorized to perform any API request to Subspace. ## Getting a JWT Bearer Token Subspace uses auth0 for JWT token management. You can acquire a JWT token by utilizing `https://id.subspace.com` and following the instructions in the curl example below. ## Protecting Your API Tokens * **JWT tokens have a expiration time of 24 hours.** Once expired, you will have to use your Subspace private API and public token to request a new one. * The Subspace private token can be rotated from within the Subspace console. Rotation may take up to 10 minutes for all systems to update state to invalidate the older token and enable the new one. * **Keep your secret token safe.** Your secret token can make any API call on behalf of your account, including changes that may impact billing such as enabling pay-as-you-go charges. Do not store your secret token in your version control system. Do not use your secret token outside your web server, such as a browser, mobile app, or distributed file. * **You may use the Subspace console to acquire an API token.** * **You may use the Subspace console to disable pay-as-you-go.** This may prevent unexpected charges due to unauthorized or abnormal usage. * **Do not embed API keys directly in code.** Instead of directly embedding API keys in your application’s code, put them in environment variables, or within include files that are stored separately from the bulk of your code—outside the source repository of your application. Then, if you share your code, the API keys will not be included in the shared files. * **Do not store API tokens inside your application’s source control.** If you store API tokens in files, keep the files outside your application’s source control system. This is particularly important if you use a public source code management system such as GitHub. * **Limit access with restricted tokens.** The Subspace console will allow you to specify the IP addresses or referrer URLs associated with each token, reducing the impact of a compromised API token. * **Use independent API tokens for different apps.** This limits the scope of each token. If an API token is compromised, you can rotate the impacted token without impacting other API tokens. # Error Codes Subspace uses HTTP response codes to indicate the success or failure of an API request. General HTML status codes: * 2xx Success. * 4xx Errors based on information provided in the request. * 5xx Errors on Subspace servers. # Security We provide a valid, signed certificate for our API methods. Be sure your connection library supports HTTPS with the SNI extension. # REST How-To Making your first REST API call is easy and can be done from your browser. You will need: * Your **secret** token and public client token, both found in the Console. * The URL for the type of data you would like to request. First, acquire a JWT Bearer Token. Command line example: curl --request POST \\ --url \"https://id.subspace.com/oauth/token\" \\ --header 'content-type: application/json' \\ --data '{ \"client_id\": \"YOURCLIENTID\", \"client_secret\": \"YOURCLIENTSECRET\", \"audience\": \"https://api.subspace.com/\", \"grant_type\": \"client_credentials\" }' REST calls are made up of: * Base url: Example: `https://api.subspace.com` * Version: Example: `v1` * The API Endpoint and any parameters: `accelerator/acc_NDA3MUI5QzUtOTY4MC00Nz` where `acc_NDA3MUI5QzUtOTY4MC00Nz` is a valid accelerator ID * Accelerator ids are always of the format `acc_NDA3MUI5QzUtOTY4MC00Nz`, with a \"acc_\" prefix followed by 22 characters. * Token header: All REST requests require a valid JWT Bearer token which should be added as an “Authorization” header to the request: `Authorization: Bearer YOUR_TOKEN_HERE` ## Authorization header example If your API token was “my_api_token”, you would add... Authorization: Bearer my_api_token ...to the header. ## Command line examples To list your current open packet_accelerators using the token “my_api_token”: curl -H “Authorization: Bearer my_api_token” https://api.subspace.com/v1/accelerator Alternately, to get the details of a specific accelerator whose id is 'abcd-ef01-2345': curl -H “Authorization: Bearer my_api_token” https://api.subspace.com/v1/accelerator/abcd-ef01-2345 # API Versioning Subspace will release new versions when we make backwards-incompatible changes to the API. We will give advance notice before releasing a new version or retiring an old version. Backwards compatible changes: * Adding new response attributes * Adding new endpoints * Adding new methods to an existing endpoint * Adding new query string parameters * Adding new path parameters * Adding new webhook events * Adding new streaming endpoints * Changing the order of existing response attributes Versions are added to the base url, for example: * `https://api.subspace.com/v1` Current Version is **v1:** `https://api.subspace.com/v1` # noqa: E501
The version of the OpenAPI document: 1.0
Contact: sales@subspace.com
Generated by: https://openapi-generator.tech
"""
import io
import json
import logging
import re
import ssl
from urllib.parse import urlencode
import urllib3
from subspace_openapi_client.exceptions import ApiException, UnauthorizedException, ForbiddenException, NotFoundException, ServiceException, ApiValueError
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if configuration.retries is not None:
addition_pool_args['retries'] = configuration.retries
if configuration.socket_options is not None:
addition_pool_args['socket_options'] = configuration.socket_options
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ApiValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, float)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
if r.status == 401:
raise UnauthorizedException(http_resp=r)
if r.status == 403:
raise ForbiddenException(http_resp=r)
if r.status == 404:
raise NotFoundException(http_resp=r)
if 500 <= r.status <= 599:
raise ServiceException(http_resp=r)
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
|
import pytest
from app.calculation import add , sub, mul, div, BankAccount, InsufficientFund
## Creating a Fixture for our Bank account class
@pytest.fixture
def zero_bank_account():
return BankAccount()
@pytest.fixture
def bank_account():
return BankAccount(50)
@pytest.mark.parametrize(
"num1, num2, result",
[ (3,3,6),
(4,2,6),
(4,5,9),
(1,3,4) ]
)
def test_add(num1, num2,result ):
assert add(num1, num2) == result
def test_sub():
assert sub(9,5) == 4
def test_mul():
assert mul(1,2) == 2
def test_div():
assert div(4,2) == 2
def test_bank_set_init_amount(bank_account):
assert bank_account.balance == 50
def test_bank_default_amount(zero_bank_account):
assert zero_bank_account.balance == 0
def test_bank_withdraw_amount(bank_account):
bank_account.withdraw(50)
assert bank_account.balance == 0
def test_bank_deposit_amount(bank_account):
bank_account.deposit(10)
assert bank_account.balance == 60
def test_bank_interest_amount(bank_account):
bank_account.collect_interest()
assert round(bank_account.balance, 2) == 55.00
@pytest.mark.parametrize(
"deposited, withdraw, result",
[ (500,300,200),
(400,200,200),
(400,400,0),
(1000,300,700) ]
)
def test_bank_transaction(zero_bank_account, deposited, withdraw,result):
zero_bank_account.deposit(deposited)
zero_bank_account.withdraw(withdraw)
assert zero_bank_account.balance == result
def test_insufficient_funds(bank_account):
with pytest.raises(InsufficientFund):
bank_account.withdraw(200)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-04-12 18:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pfb_analysis', '0014_auto_20170412_1611'),
('pfb_analysis', '0015_add_neighborhood_geoms'),
]
operations = [
]
|
"""
Tool to generate prices from all sources
"""
import json
import logging
import multiprocessing
import pathlib
from typing import Any, Dict, List, Tuple, Union
import mtgjson4
from mtgjson4.provider import cardhoader, tcgplayer
LOGGER = logging.getLogger(__name__)
def build_price_data(card: Dict[str, Any]) -> Tuple[str, Dict[str, Dict[str, str]]]:
"""
Build up price info for a single card and add it to global index
:param card: Card to get price data of
:return Card object
"""
LOGGER.info(f"Building price for {card['name']}")
return card["uuid"], {"prices": cardhoader.get_card_data(card["uuid"])}
class MtgjsonPrice:
"""
Class to construct MTGJSON Pricing data for additional download files.
"""
def __init__(self, all_printings_path: Union[str, pathlib.Path]) -> None:
"""
Initializer to load in cards and establish pricing database
:param all_printings_path: Path to AllPrintings, without ending (needs JSON and SQLITE there)
"""
self.mtgjson_cards: List[Dict[str, Any]] = []
self.prices_output: Dict[str, Dict[str, Dict[str, str]]] = {}
self.all_printings_path = (
pathlib.Path(all_printings_path).expanduser().with_suffix(".json")
)
if not self.all_printings_path.exists():
LOGGER.error(
f"Pricing can't find AllPrintings.json at {self.all_printings_path}"
)
return
self.__load_mtgjson_cards_from_file()
self.__collate_pricing()
def __bool__(self) -> bool:
"""
See if the class has been properly initialized
:return: Class initialization status
"""
return bool(self.prices_output)
def get_price_database(self) -> str:
"""
Get price data dumps for output files
:return: Price database
"""
return json.dumps(
self.prices_output, sort_keys=True, indent=mtgjson4.PRETTY_OUTPUT.get()
)
def __load_mtgjson_cards_from_file(self) -> None:
"""
Load in all MTGJSON cards from AllPrintings.json file
"""
with self.all_printings_path.expanduser().open() as file:
all_sets = json.load(file)
for set_content in all_sets.values():
self.mtgjson_cards.extend(set_content.get("cards", []))
def __prime_databases(self) -> None:
"""
Prime price databases before multiprocessing iterations
This adds values from _now_ to the database
"""
tcgplayer.generate_and_store_tcgplayer_prices(self.all_printings_path)
cardhoader.get_card_data("")
def __collate_pricing(self) -> None:
"""
Build up price databases in parallel
"""
LOGGER.info("Priming Database")
self.__prime_databases()
LOGGER.info("Starting Pool")
with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
futures = pool.map(build_price_data, self.mtgjson_cards)
for card_price in futures:
self.prices_output[card_price[0]] = card_price[1]
|
import os
from msg_resolver import RESOLVER
from storage_engine import STORAGE_ENGINE
from cache import CACHE
from .util.config import Config
from .util.logging import logging_config
def setup_config():
config_path = os.getenv('config_path') or "./cfg/server.yml"
config = Config(config_path=config_path)
config.load_config()
def setup_logging():
logging_config(os.getenv("log.log_dir"))
def setup_msg_resolver(model="default"):
try:
resolver = RESOLVER[model]()
except:
resolver = RESOLVER["default"]()
return resolver
def setup_storage_engine(model="default"):
try:
storage_engine = STORAGE_ENGINE[model]()
except:
storage_engine = STORAGE_ENGINE["default"]()
storage_engine.init_storage()
return storage_engine
def setup_operation(model="default"):
import pkgutil
from importlib import import_module
import operation
for importer, modname, ispkg in pkgutil.iter_modules(operation.__path__):
import_module("operation.{}".format(modname))
def setup_cache():
cache = CACHE["default"]()
return cache
|
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.constants import e, m_p
def gaussian_generator(eps_geo, phase_space_tuple=('x', 'xp'), alpha=0, beta=1):
sigma = np.sqrt(eps_geo)
def generate(bunch):
n_macroparticles = bunch.n_macroparticles
x = np.random.normal(scale=sigma, size=n_macroparticles)
xp = np.random.normal(scale=sigma, size=n_macroparticles)
M = np.array([[np.sqrt(beta), 0],
[-alpha/np.sqrt(beta), 1./np.sqrt(beta)]])
x, xp = M[0,0]*x + M[0,1]*xp, M[1,0]*x + M[1,1]*xp
setattr(bunch, phase_space_tuple[0], x)
setattr(bunch, phase_space_tuple[1], xp)
return generate
class Bunch(object):
def __init__(self, n_macroparticles,
weight=1, charge=e, mass=m_p, gamma=1,
*phase_space_generators):
self.n_macroparticles = n_macroparticles
self.weight = weight
self.charge = charge
self.mass = mass
self.gamma = gamma
[generate(self) for generate in phase_space_generators]
def emittance_normalised(self, x, xp):
return np.sqrt(self.gamma**2 - 1) * \
np.sqrt( np.std(x**2)*np.std(xp**2) - np.std(x*xp)**2 )
def epsn_x(self):
return emittance_normalised(self.x, self.xp)
def epsn_y(self):
return emittance_normalised(self.y, self.yp)
def epsn_z(self):
return emittance_normalised(self.z, self.dp)
class Beam(object):
def __init__(self, bunches_list):
self.n_macroparticles = sum([b.n_macroparticles for b in bunches_list])
self.weight = np.concatenate(b.weight for b in bunches_list)
self.charge = np.concatenate(b.charge for b in bunches_list)
self.mass = np.concatenate(b.mass for b in bunches_list)
self.gamma = np.concatenate(b.gamma for b in bunches_list)
self.x = np.concatenate(b.x for b in bunches_list)
self.xp = np.concatenate(b.xp for b in bunches_list)
self.y = np.concatenate(b.y for b in bunches_list)
self.yp = np.concatenate(b.yp for b in bunches_list)
self.z = np.concatenate(b.z for b in bunches_list)
self.dp = np.concatenate(b.dp for b in bunches_list)
class MachineElement(object):
__metaclass__ = ABCMeta
@abstractmethod
def kick(self, beam):
pass
class TwissMap(MachineElement):
def __init__(self, plane='x',
alpha_0=0, beta_0=100, alpha_1=0, beta_1=100, dmu=0,
*detuners):
B = np.array([[1./np.sqrt(beta_0), 0],
[alpha_0/np.sqrt(beta_0), np.sqrt(beta_0)]])
R = np.array([[np.cos(dmu), np.sin(dmu)],
[-np.sin(dmu), np.cos(dmu)]])
B_inv = np.array([[np.sqrt(beta_1), 0],
[-alpha_1/np.sqrt(beta_1), 1./np.sqrt(beta_1)]])
I = np.array([[1, 0],
[0, 1]])
S = np.array([[0, 1],
[-1, 0]])
self.dmu = dmu
self.M = np.dot(B_inv, np.dot(R, B))
self.C = np.dot(B_inv, np.dot(I, B))
self.S = np.dot(B_inv, np.dot(S, B))
def kick(self, beam):
if self.plane=='x':
beam.x, beam.xp = (self.C[0,0]*np.cos(self.dmu) + self.S[0,0]*np.sin(self.dmu)) * self.x \
+ (self.C[0,1]*np.cos(self.dmu) + self.S[0,1]*np.sin(self.dmu)) * self.xp, \
(self.C[1,0]*np.cos(self.dmu) + self.S[1,0]*np.sin(self.dmu)) * self.x \
+ (self.C[1,1]*np.cos(self.dmu) + self.S[1,1]*np.sin(self.dmu)) * self.xp
if self.plane=='y':
beam.y, beam.yp = (self.C[0,0]*np.cos(self.dmu) + self.S[0,0]*np.sin(self.dmu)) * self.y \
+ (self.C[0,1]*np.cos(self.dmu) + self.S[0,1]*np.sin(self.dmu)) *self.yp, \
+ (self.C[1,0]*np.cos(self.dmu) + self.S[1,0]*np.sin(self.dmu)) * self.y \
+ (self.C[1,1]*np.cos(self.dmu) + self.S[1,1]*np.sin(self.dmu))* self.yp
if self.plane=='z':
beam.z, beam.dp = (self.C[0,0]*np.cos(self.dmu) + self.S[0,0]*np.sin(self.dmu)) * self.z \
+ (self.C[0,1]*np.cos(self.dmu) + self.S[0,1]*np.sin(self.dmu)) *self.dp, \
+ (self.C[1,0]*np.cos(self.dmu) + self.S[1,0]*np.sin(self.dmu)) * self.z \
+ (self.C[1,1]*np.cos(self.dmu) + self.S[1,1]*np.sin(self.dmu))* self.dp
class RFMap(MachineElement):
def __init__(self, V, h, dphi):
pass
|
# -------------------------------------------------------------------------
# Copyright (c) 2020 Supun Nakandala. All Rights Reserved.
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Base class for SV implementation.
"""
import numpy as np
import scipy
import torch
from ._physical_operator import PhysicalOperator
class SVC(PhysicalOperator, torch.nn.Module):
def __init__(self, logical_operator, kernel, degree, sv, nv, a, b, gamma, coef0, classes, device):
super(SVC, self).__init__(logical_operator, classification=True)
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.regression = False
sv = sv.toarray() if type(sv) == scipy.sparse.csr.csr_matrix else sv
self.sv = torch.nn.Parameter(torch.from_numpy(sv).double(), requires_grad=False)
self.sv_t = torch.nn.Parameter(torch.transpose(self.sv, 0, 1), requires_grad=False)
self.sv_norm = torch.nn.Parameter(-self.gamma * (self.sv ** 2).sum(1).view(1, -1), requires_grad=False)
self.coef0 = coef0
self.n_features = sv.shape[1]
self.a = a
self.b = torch.nn.Parameter(torch.from_numpy(b.reshape(1, -1)).double(), requires_grad=False)
self.start = [sum(nv[:i]) for i in range(len(nv))]
self.end = [self.start[i] + nv[i] for i in range(len(nv))]
self.len_nv = len(nv)
true_classes, false_classes = zip(*[(i, j) for i in range(self.len_nv) for j in range(i + 1, self.len_nv)])
self.true_classes = torch.nn.Parameter(torch.IntTensor([true_classes]), requires_grad=False)
self.false_classes = torch.nn.Parameter(torch.IntTensor([false_classes]), requires_grad=False)
self.classes = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)
self.perform_class_select = False
if min(classes) != 0 or max(classes) != len(classes) - 1:
self.perform_class_select = True
self.n_classes = len(classes)
def forward(self, x):
x = x.double()
if self.kernel == "linear":
k = torch.mm(x, self.sv_t)
elif self.kernel == "rbf":
# using quadratic expansion--susseptible to rounding-off errors
# http://www.robots.ox.ac.uk/~albanie/notes/Euclidean_distance_trick.pdf
x_norm = -self.gamma * (x ** 2).sum(1).view(-1, 1)
k = torch.exp(x_norm + self.sv_norm + 2.0 * self.gamma * torch.mm(x, self.sv_t).double())
elif self.kernel == "sigmoid":
k = torch.sigmoid(self.gamma * torch.mm(x, self.sv_t) + self.coef0)
else: # poly kernel
k = torch.pow(self.gamma * torch.mm(x, self.sv_t) + self.coef0, self.degree)
c = [
sum(self.a[i, p] * k[:, p : p + 1] for p in range(self.start[j], self.end[j]))
+ sum(self.a[j - 1, p] * k[:, p : p + 1] for p in range(self.start[i], self.end[i]))
for i in range(self.len_nv)
for j in range(i + 1, self.len_nv)
]
c = torch.cat(c, dim=1) + self.b
if self.n_classes == 2:
class_ids = torch.gt(c, 0.0).int().flatten()
else:
votes = torch.where(c > 0, self.true_classes, self.false_classes)
# TODO mode is still not implemented for GPU backend.
votes = votes.data.cpu()
class_ids, _ = torch.mode(votes, dim=1)
# No class probabilities in SVC.
if self.perform_class_select:
temp = torch.index_select(self.classes, 0, class_ids.long())
return temp, temp
else:
return class_ids, class_ids
|
import unittest
import json
from app import app
class TestAPI(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
def test_cypher_request(self):
url = \
'/api/?url=https://en.wikipedia.org/' + \
'wiki/ROT13&q=To%20get%20to%20the%20other%20side'
expected_message = \
"Warning: String 'To get to the other side' " + \
"was found on website https://en.wikipedia.org/wiki/ROT13 " + \
"within the body tag"
response = self.app.get(url)
rv = json.loads(response.data)
self.assertTrue(response.status_code == 200)
self.assertEqual(rv['message'], expected_message)
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2021-2022 Dai HBG
"""
该代码定义的类用于计算一个信号的平均IC等统计值
日志
2021-08-30
- 定义:计算平均IC,信号自相关系数,IC_IR,IC为正的频率
2021-09-08
- 新增:统计信号排名最高的1个,5个,10个股票的平均收益,以评估信号的纯多头表现
2021-10-09
- 更新:统计平均收益应该也包含超额收益
2021-10-10
- 更新:stats应该存下top前50的所有超额收益率序列,因此可以后期选择需要的排位的股票
2021-10-15
- 更新:对于周频测试,可以使用字段fix_weekday指定计算周几的平均IC
2022-01-10
- 更新:新增不同的IC评价方式
- 更新:涨跌停判定需要另外传入一个判定矩阵,用来判定是否过滤
"""
import numpy as np
class IcCalculator:
def __init__(self):
pass
def cal(self, signal: np.array, ret: np.array, top: np.array, method: str = 'IC', zdt: bool = False,
zdt_top: np.array = None, param: dict = None) -> np.array:
"""
:param signal: 信号矩阵
:param ret: 收益率矩阵
:param top: 合法矩阵
:param method: 计算方式
:param zdt: 是否过滤涨跌停
:param zdt_top: 涨跌停收益率矩阵
:param param: 参数
:return:
"""
ics = np.zeros(len(signal))
for i in range(len(signal)):
sig = signal[i, top[i]]
r = ret[i, top[i]]
if np.sum(~np.isnan(sig)) < 2 or np.sum(~np.isnan(r)) < 2:
continue
if method == 'IC': # 普通计算截面IC
se = (~np.isnan(sig)) & (~np.isnan(r)) # 选出都没有缺失值的
if zdt:
se = se & (zdt_top[i, top[i]]) # 过滤涨跌停
if np.sum(se) >= 2:
ics[i] = np.corrcoef(sig[se], r[se])[0, 1]
else:
ics[i] = 0
elif method == 'long_IC': # 计算多头IC
sig -= np.nanmean(sig)
r -= np.nanmean(r)
sig[np.isnan(sig)] = 0
r[np.isnan(r)] = 0
if i > 0:
se = abs(ret[i - 1, top[i]]) < 0.099 # 过滤涨跌停
if np.sum(se & (sig > 0)) >= 2:
cov = np.sum(sig[se & (sig > 0)] * r[se & (sig > 0)])
ics[i] = cov / (np.std(r[se]) * np.std(sig[se]))
else:
ics[i] = 0
else:
cov = np.sum(sig[sig > 0] * r[sig > 0])
ics[i] = cov / (np.std(r) * np.std(sig))
elif method == 'poly_IC': # 给不同部分的股票给予不同权重
if param is None:
degree = 2
print('no param, set degree to 2')
else:
try:
degree = param['degree']
except KeyError:
degree = 2
print('no key \'degree\', set degree to 2')
sig -= np.nanmean(sig)
r -= np.nanmean(r)
sig[np.isnan(sig)] = 0
r[np.isnan(r)] = 0
sig[sig > 0] = sig[sig > 0] ** degree
sig[sig < 0] = -(-sig[sig < 0]) ** degree
if i > 0:
se = abs(ret[i - 1, top[i]]) < 0.099 # 过滤涨跌停
if np.sum(se) >= 2:
ics[i] = np.corrcoef(sig[se], r[se])[0, 1]
else:
ics[i] = 0
else:
ics[i] = np.corrcoef(sig, r)[0, 1]
elif method == 'long_poly_IC':
if param is None:
degree = 2
print('no param, set degree to 2')
else:
try:
degree = param['degree']
except KeyError:
degree = 2
print('no key \'degree\', set degree to 2')
sig -= np.nanmean(sig)
r -= np.nanmean(r)
sig[np.isnan(sig)] = 0
r[np.isnan(r)] = 0
sig[sig > 0] = sig[sig > 0] ** degree
sig[sig < 0] = -(-sig[sig < 0]) ** degree
if i > 0:
se = abs(ret[i - 1, top[i]]) < 0.099 # 过滤涨跌停
if np.sum(se & (sig > 0)) >= 2:
cov = np.sum(sig[se & (sig > 0)] * r[se & (sig > 0)])
ics[i] = cov / (np.std(r[se]) * np.std(sig[se]))
else:
ics[i] = 0
else:
cov = np.sum(sig[sig > 0] * r[sig > 0])
ics[i] = cov / (np.std(r) * np.std(sig))
elif 'long_top_' in method: # 计算多头靠前的部分
se = (~np.isnan(sig)) & (~np.isnan(r)) # 选出都没有缺失值的
n = int(method.split('_')[-1])
# sig[np.isnan(sig)] = 0
r -= np.nanmean(r)
# r[np.isnan(r)] = 0
if i > 0:
se = se & (abs(ret[i - 1, top[i]]) < 0.099) # 过滤涨跌停
if np.sum(se) >= 2:
arg_sig = np.argsort(sig[se])
ics[i] = np.mean(r[se][arg_sig[-n:]])
else:
ics[i] = 0
else:
arg_sig = np.argsort(sig[se])
ics[i] = np.mean(r[se][arg_sig[-n:]])
return ics
class Stats:
def __init__(self):
self.ICs = []
self.mean_IC = 0
# self.auto_corr = 0
self.IC_IR = 0
self.positive_IC_rate = 0
# self.top_n_ret = {i - 1: [] for i in range(1, 51)} # 存储多头超额收益
# self.top_n_raw_ret = {i - 1: [] for i in range(1, 51)} # 存储多头收益
class AutoTester:
def __init__(self):
self.icc = IcCalculator()
def test(self, signal: np.array, ret: np.array, top: np.array = None, method: str = 'IC',
param: dict = None, zdt: bool = True, zdt_top: np.array = None) -> Stats:
"""
:param signal: 信号矩阵
:param ret: 和信号矩阵形状一致的收益率矩阵,意味着同一个时间维度已经做了delay
:param top: 每个时间截面上进入截面的股票位置
:param zdt: 是否过滤zdt
:param zdt_top: zdt对应的收益率矩阵
:return: 返回Stats类的实例
"""
if top is None:
top = signal != 0
if zdt_top is None:
zdt_top = np.zeros(top.shape)
zdt_top[:] = True
ics = []
auto_corr = []
assert len(signal) == len(ret)
assert len(signal) == len(top)
stats = Stats()
ics = self.icc.cal(signal=signal, ret=ret, top=top, method=method, param=param, zdt=zdt, zdt_top=zdt_top)
# print(ics)
# ics = np.array(ics)
ics[np.isnan(ics)] = 0
# auto_corr = np.array(auto_corr)
# auto_corr[np.isnan(auto_corr)] = 0
stats.ICs = ics
stats.mean_IC = np.mean(ics)
# stats.auto_corr = np.mean(auto_corr)
if len(ics) > 1:
stats.IC_IR = np.mean(ics) / np.std(ics)
stats.positive_IC_rate = np.sum(ics > 0) / len(ics)
return stats
@staticmethod
def cal_bin_ret(signal, ret, top=None, cell=20, zdt: bool = True, zdt_top: np.array = None):
signal[np.isnan(signal)] = 0
if top is None:
top = signal != 0
z = [[] for _ in range(cell)]
r = [[] for _ in range(cell)]
if zdt:
if zdt_top is None:
zdt_top = np.zeros(top.shape)
zdt_top[:] = True
for i in range(len(signal)):
sig = signal[i, top[i]].copy()
rr = ret[i, top[i]].copy()
se = (~np.isnan(sig)) & (~np.isnan(rr))
if zdt:
se = se & zdt_top[i, top[i]]
sig[se] = sig[se] - np.mean(sig[se])
sig[se] = sig[se] / np.std(sig[se])
rr[se] -= np.mean(rr[se])
sig = sig[se]
rr = rr[se]
# 放入分组
signal_ret = []
arg_sig = np.argsort(sig)
pos = 0
while pos < cell:
if pos < cell - 1:
z[pos] += list(sig[arg_sig[int(len(sig) / cell * pos): int(len(sig) / cell * (pos + 1))]])
r[pos] += list(rr[arg_sig[int(len(sig) / cell * pos): int(len(sig) / cell * (pos + 1))]])
else:
z[pos] += list(sig[arg_sig[int(len(sig) / cell * pos):]])
r[pos] += list(rr[arg_sig[int(len(sig) / cell * pos):]])
pos += 1
return z, r
|
from __future__ import print_function
from __future__ import absolute_import
import datetime
import io
import csv
import json
from sqlalchemy import Column, Integer, String, DateTime, Float, Text, func
from .db import Base
from .psiturk_config import PsiturkConfig
from itertools import groupby
config = PsiturkConfig()
config.load_config()
TABLENAME = config.get('Database Parameters', 'table_name')
CODE_VERSION = config.get('Task Parameters', 'experiment_code_version')
class Participant(Base):
"""
Object representation of a participant in the database.
"""
__tablename__ = TABLENAME
uniqueid = Column(String(128), primary_key=True)
assignmentid = Column(String(128), nullable=False)
workerid = Column(String(128), nullable=False)
hitid = Column(String(128), nullable=False)
ipaddress = Column(String(128))
browser = Column(String(128))
platform = Column(String(128))
language = Column(String(128))
cond = Column(Integer)
counterbalance = Column(Integer)
codeversion = Column(String(128))
beginhit = Column(DateTime)
beginexp = Column(DateTime)
endhit = Column(DateTime)
bonus = Column(Float, default=0)
status = Column(Integer, default=1)
mode = Column(String(128))
if 'postgres://' in config.get('Database Parameters', 'database_url').lower():
datastring = Column(Text)
else:
datastring = Column(Text(4294967295))
def __init__(self, **kwargs):
self.uniqueid = "{workerid}:{assignmentid}".format(**kwargs)
self.status = 1
self.codeversion = CODE_VERSION
self.beginhit = datetime.datetime.now()
for key in kwargs:
setattr(self, key, kwargs[key])
def __repr__(self):
return "Subject(uniqueid|%s, condition|%s, status|%s, codeversion|%s)" % (
self.uniqueid,
self.cond,
self.status,
self.codeversion)
def get_trial_data(self):
try:
trialdata = json.loads(self.datastring)["data"]
except (TypeError, ValueError):
# There was no data to return.
print(("No trial data found in record:", self))
return("")
try:
ret = []
with io.BytesIO() as outstring:
csvwriter = csv.writer(outstring)
for trial in trialdata:
csvwriter.writerow((
self.uniqueid,
trial["current_trial"],
trial["dateTime"],
json.dumps(trial["trialdata"])))
ret = outstring.getvalue()
return ret
except:
print(("Error reading record:", self))
return("")
def get_event_data(self):
try:
eventdata = json.loads(self.datastring)["eventdata"]
except (ValueError, TypeError):
# There was no data to return.
print(("No event data found in record:", self))
return("")
try:
ret = []
with io.BytesIO() as outstring:
csvwriter = csv.writer(outstring)
for event in eventdata:
csvwriter.writerow(
(self.uniqueid, event["eventtype"], event["interval"], event["value"], event["timestamp"]))
ret = outstring.getvalue()
return ret
except:
print(("Error reading record:", self))
return("")
def get_question_data(self):
try:
questiondata = json.loads(self.datastring)["questiondata"]
except (TypeError, ValueError):
# There was no data to return.
print(("No question data found in record:", self))
return("")
try:
ret = []
with io.BytesIO() as outstring:
csvwriter = csv.writer(outstring)
for question in questiondata:
csvwriter.writerow(
(self.uniqueid, question, questiondata[question]))
ret = outstring.getvalue()
return ret
except:
print(("Error reading record:", self))
return("")
@classmethod
def count_workers(cls, query=None, group_bys=['codeversion','mode','status']):
group_by_labels = group_bys + ['count']
group_bys = [getattr(cls, group_by) for group_by in group_bys]
if not query:
query = cls.query
for group_by in group_bys:
query = query.group_by(group_by).order_by(group_by.desc())
entities = group_bys + [func.count()]
query = query.with_entities(*entities)
results = query.all()
def list_to_grouped_dicts(results):
parsed_results = {}
for k, group in groupby(results, lambda row: row[0]): # k will be codeversion
group = list(group)
if len(group[0]) > 2:
parsed_results[k] = list_to_grouped_dicts([row[1:] for row in group])
else:
parsed_results.update({k:v for k,v in group})
return parsed_results
parsed_results = list_to_grouped_dicts(results)
zipped_results = [dict(zip(group_by_labels, row)) for row in results]
return zipped_results
@classmethod
def all_but_datastring(cls):
query = cls.query
query = query.with_entities(*[c for c in cls.__table__.c if c.name != 'datastring'])
return query.all()
class Hit(Base):
'''
'''
__tablename__ = 'amt_hit'
hitid = Column(String(128), primary_key=True)
|
# 类似于VGG的卷积神经网络
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.optimizers import SGD
# Generate dummy data
x_train = np.random.random((100, 100, 100, 3))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
x_test = np.random.random((20, 100, 100, 3))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)
model = Sequential()
# input 100x100 images with 3 channels -> (100, 100, 3) tensors.
# this applies 32 convolution filters of size 3x3 each.
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(100, 100, 3)))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.fit(x_train, y_train, batch_size=32, epochs=10)
score = model.evaluate(x_test, y_test, batch_size=32)
|
# LINK FOR PROBLEM: http://codeforces.com/problemset/problem/742/A
n = int(raw_input())
if n == 0:
print '1'
else:
factor = n % 4
if factor == 1:
print '8'
elif factor == 2:
print '4'
elif factor == 3:
print '2'
else:
print '6'
|
# -*- coding: utf-8 -*-
"""
http://www.astroml.org/sklearn_tutorial/dimensionality_reduction.html
"""
print (__doc__)
import numpy as np
import copy
import cPickle as pickle
import matplotlib
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import gridspec
import nslkdd.preprocessing as preprocessing
import nslkdd.data.model as model
from nslkdd.get_kdd_dataframe import attack_types
from nslkdd.get_kdd_dataframe import df_by_attack_type
import colorhex
import util
import logger
today = util.make_today_folder('./results')
today = "./results/2015-02-09"
plot_lim_max = 21
plot_lim_min = -21
def plot_true_labels(ax, data_per_true_labels, title="", highlight_point = None):
ax.set_title("True labels")
for i, p in enumerate(data_per_true_labels) :
x = np.array([t[0] for t in p])
y = np.array([t[1] for t in p])
if i == model.attack_normal:
colors = ['g'] * len(x)
ax.scatter(x, y, c=colors)
elif i != model.attack_normal and i != highlight_point:
colors = ['r'] * len(x)
ax.scatter(x, y, c=colors)
if highlight_point != None :
p = data_per_true_labels[highlight_point]
x = np.array([t[0] for t in p])
y = np.array([t[1] for t in p])
colors = ['y'] * len(x)
ax.scatter(x, y, c=colors)
def plot_normal_label(ax, data_per_true_labels, title=""):
ax.set_title(title)
for i, p in enumerate(data_per_true_labels) :
x = [t[0] for t in p]
y = [t[1] for t in p]
x = np.array(x)
y = np.array(y)
if i == model.attack_normal:
ax.scatter(x, y, c='g')
logger.debug("* mean/std of normal")
logger.debug(len(x))
logger.debug(np.mean(x))
logger.debug(np.mean(y))
logger.debug(np.std(x))
logger.debug(np.std(y))
def plot_abnormal_label(ax, data_per_true_labels, title=""):
ax.set_title(title)
for i, p in enumerate(data_per_true_labels) :
x = [t[0] for t in p]
y = [t[1] for t in p]
x = np.array(x)
y = np.array(y)
if i != model.attack_normal:
ax.scatter(x, y, c='r')
def get_data(title):
with open(today+'/'+title+'_cproj.pkl','rb') as input:
cproj = pickle.load(input)
with open(today+'/'+title+'_res.pkl','rb') as input:
res = pickle.load(input)
with open(today+'/'+title+'_df.pkl','rb') as input:
df = pickle.load(input)
with open(today+'/'+title+'_highlight_point.pkl','rb') as input:
highlight_point = pickle.load(input)
return cproj, res, df, highlight_point
def gen_plot(cproj, res, df, highlight_point, title):
_, attacks = preprocessing.get_header_data()
# figure setting
fig, axarr = plt.subplots(4, 4, sharex='col', sharey='row')
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.xlim(plot_lim_min, plot_lim_max)
plt.ylim(plot_lim_min, plot_lim_max)
# plt.xlabel('interval')
# plt.ylabel('log(probability) + k')
# plt.title('Convergence plot')
# plt.grid(True)
data_per_true_labels = []
for i in range( len(attacks) ):
data_per_true_labels.append([])
true_attack_types = df["attack"].values.tolist()
for i, d in enumerate(cproj):
data_per_true_labels[true_attack_types[i]].append(d)
k = int( len(cproj) * 12/500.0)
clusters = [0] * k
cluster_xs = []
cluster_ys = []
for i in range(k):
cluster_xs.append([])
cluster_ys.append([])
cluster_xmeans = [0] * k
cluster_ymeans = [0] * k
cluster_xstds = [0] * k
cluster_ystds = [0] * k
for i, p in enumerate(cproj):
true_label = true_attack_types[i]
if true_label == model.attack_normal :
clusters[ res[i] ] = clusters[ res[i] ] + 1
else :
clusters[ res[i] ] = clusters[ res[i] ] - 1
cluster_xs[ res[i] ].append(p[0])
cluster_ys[ res[i] ].append(p[1])
logger.debug("* mean/std of cluster")
for i, cluster in enumerate(clusters) :
cluster_xmeans[i] = np.mean(cluster_xs[i])
cluster_ymeans[i] = np.mean(cluster_ys[i])
cluster_xstds[i] = np.std(cluster_xs[i])
cluster_ystds[i] = np.std(cluster_ys[i])
logger.debug("cluster : " + str(i))
logger.debug("- size [" + str(len(cluster_xs[i])) + "]")
logger.debug("- xmin [" + str(cluster_xmeans[i]) + "]")
logger.debug("- ymin [" + str(cluster_ymeans[i]) + "]")
logger.debug("- xstd [" + str(cluster_xstds[i]) + "]")
logger.debug("- ystd [" + str(cluster_ystds[i]) + "]")
ax1 = axarr[0, 0]
ax2 = axarr[0, 1]
ax3 = axarr[0, 2]
ax4 = axarr[0, 3]
ax5 = axarr[1, 0]
ax6 = axarr[1, 1]
ax7 = axarr[1, 2]
ax8 = axarr[1, 3]
ax9 = axarr[2, 0]
ax10 = axarr[2, 1]
ax11 = axarr[2, 2]
ax12 = axarr[2, 3]
ax13 = axarr[3, 0]
ax14 = axarr[3, 1]
ax15 = axarr[3, 2]
ax16 = axarr[3, 3]
plot_true_labels(ax1, data_per_true_labels, "True labels", highlight_point)
plot_normal_label(ax2, data_per_true_labels, "True normals")
plot_abnormal_label(ax3, data_per_true_labels, "True abnormal")
ax4.set_title("k-means")
for i, p in enumerate(cproj):
ax4.scatter(p[0], p[1], c=colorhex.codes[ res[i] ])
##############################################################
ax5.set_title("Normal res")
for i, p in enumerate(cproj):
if clusters[ res[i] ] >= 0 :
ax5.scatter(p[0], p[1], c='g')
##############################################################
ax6.set_title("Abnormal res")
for i, p in enumerate(cproj):
if clusters[ res[i] ] < 0 :
ax6.scatter(p[0], p[1], c='r')
##############################################################
ax7.set_title("Cluster 1")
for i, p in enumerate(cproj):
if res[i] == 0 :
ax7.scatter(p[0], p[1], c='g')
##############################################################
ax8.set_title("Cluster 2")
for i, p in enumerate(cproj):
if res[i] == 1 :
ax8.scatter(p[0], p[1], c='g')
##############################################################
# ax9.set_title("kmeans")
# kmean_plot(title, ax9)
##############################################################
ax9.set_title("Cluster 3")
for i, p in enumerate(cproj):
if res[i] == 2 :
ax9.scatter(p[0], p[1], c='g')
##############################################################
ax10.set_title("Cluster 4")
for i, p in enumerate(cproj):
if res[i] == 3 :
ax10.scatter(p[0], p[1], c='g')
##############################################################
ax11.set_title("Cluster 5")
for i, p in enumerate(cproj):
if res[i] == 4 :
ax11.scatter(p[0], p[1], c='g')
##############################################################
ax12.set_title("Cluster 6")
for i, p in enumerate(cproj):
if res[i] == 5 :
ax12.scatter(p[0], p[1], c='g')
##############################################################
ax13.set_title("Cluster 7")
for i, p in enumerate(cproj):
if res[i] == 6 :
ax13.scatter(p[0], p[1], c='g')
##############################################################
ax14.set_title("Cluster 8")
for i, p in enumerate(cproj):
if res[i] == 7 :
ax14.scatter(p[0], p[1], c='g')
##############################################################
ax15.set_title("Cluster 9")
for i, p in enumerate(cproj):
if res[i] == 8 :
ax15.scatter(p[0], p[1], c='g')
##############################################################
ax16.set_title("Cluster 10")
for i, p in enumerate(cproj):
if res[i] == 9 :
ax16.scatter(p[0], p[1], c='g')
##############################################################
print title + " has been saved"
fig.savefig(today + "/" + title + ".png")
plt.close()
fig, ax = plt.subplots(1, 1, sharex='col', sharey='row')
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.xlim(plot_lim_min, plot_lim_max)
plt.ylim(plot_lim_min, plot_lim_max)
for i, p in enumerate(data_per_true_labels) :
x = np.array([t[0] for t in p])
y = np.array([t[1] for t in p])
if i == model.attack_normal:
colors = ['g'] * len(x)
ax.scatter(x, y, c=colors)
elif i != model.attack_normal and i != highlight_point:
colors = ['r'] * len(x)
ax.scatter(x, y, c=colors)
if highlight_point != None :
p = data_per_true_labels[highlight_point]
x = np.array([t[0] for t in p])
y = np.array([t[1] for t in p])
colors = ['y'] * len(x)
ax.scatter(x, y, c=colors)
plt.xlabel('Similarity score to normal')
plt.ylabel('Similarity score to abnormal')
plt.title('True labels')
plt.grid(True)
fig.savefig(today + "/" + title + "_true_.png")
plt.close()
fig, ax = plt.subplots(1, 1, sharex='col', sharey='row')
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.xlim(plot_lim_min, plot_lim_max)
plt.ylim(plot_lim_min, plot_lim_max)
for i, p in enumerate(cproj):
if clusters[ res[i] ] >= 0 :
ax.scatter(p[0], p[1], c='g')
else :
ax.scatter(p[0], p[1], c='r')
plt.xlabel('Similarity score to normal')
plt.ylabel('Similarity score to abnormal')
plt.title('Prediected labels')
plt.grid(True)
fig.savefig(today + "/" + title + "_prediction_.png")
plt.close()
def gen_plots():
dataset_description = "training20_only"
title = dataset_description
cproj, res, df, highlight_point = get_data(title)
gen_plot(cproj, res, df, highlight_point, title)
dataset_description = "training20_test20"
for attack_type_index, attack_type in enumerate(model.attack_types) :
if attack_type_index == model.attack_normal : # why <= instead of !=
continue
title = dataset_description + "_" + attack_type
cproj, res, df, highlight_point = get_data(title)
gen_plot(cproj, res, df, highlight_point, title)
def gen_one_plot():
dataset_description = "training20_test20_guess_passwd"
title = dataset_description
cproj, res, df, highlight_point = get_data(title)
gen_plot(cproj, res, df, highlight_point, title)
def kmean_plot(title, ax):
_, attacks = preprocessing.get_header_data()
cproj, res, df, highlight_point = get_data(title)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
# plt.xlim(plot_lim_min, plot_lim_max)
# plt.ylim(plot_lim_min, plot_lim_max)
# ax = axarr
# ax.set_title("plot")
data_per_true_labels = []
for i in range( len(attacks) ):
data_per_true_labels.append([])
true_attack_types = df["attack"].values.tolist()
for i, d in enumerate(cproj):
data_per_true_labels[true_attack_types[i]].append(d)
k = 10
clusters = [0] * k
for i, p in enumerate(cproj):
true_label = true_attack_types[i]
if true_label == model.attack_normal :
clusters[ res[i] ] = clusters[ res[i] ] + 1
else :
clusters[ res[i] ] = clusters[ res[i] ] - 1
x = []
y = []
p = []
for ii, pp in enumerate(cproj):
if clusters[ res[ii] ] > 0 :
x.append(pp[0])
y.append(pp[1])
p.append(pp)
from sklearn.cluster import KMeans
data = p
h = .02
estimator = KMeans(init='k-means++', n_clusters=3)
estimator.fit(data)
centroids = estimator.cluster_centers_
x_min, x_max = min(x) + 1, max(x) - 1
y_min, y_max = min(y) + 1, max(y) - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
colors = ['g'] * len(x)
ax.scatter(x, y, c=colors)
ax.scatter(np.mean(x), np.mean(y), c='r')
ax.scatter(np.median(x), np.median(y), c='b')
delta = 0.025
X = np.arange(plot_lim_min, plot_lim_max, delta)
Y = np.arange(plot_lim_min, plot_lim_max, delta)
X,Y = np.meshgrid(X,Y)
Z = mlab.bivariate_normal(X, Y, np.std(x), np.std(y), np.mean(x), np.mean(y))
plt.contour(X,Y,Z)
def test():
_, attacks = preprocessing.get_header_data()
dataset_description = "training20_only"
title = dataset_description
cproj, res, df, highlight_point = get_data(title)
fig, axarr = plt.subplots(1, 1, sharex='col', sharey='row')
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.xlim(plot_lim_min, plot_lim_max)
plt.ylim(plot_lim_min, plot_lim_max)
ax = axarr
ax.set_title("plot")
data_per_true_labels = []
for i in range( len(attacks) ):
data_per_true_labels.append([])
true_attack_types = df["attack"].values.tolist()
for i, d in enumerate(cproj):
data_per_true_labels[true_attack_types[i]].append(d)
for i, p in enumerate(data_per_true_labels) :
x = np.array([t[0] for t in p])
y = np.array([t[1] for t in p])
if i == model.attack_normal:
from sklearn.cluster import KMeans
data = p
h = .02
estimator = KMeans(init='k-means++', n_clusters=3)
estimator.fit(data)
centroids = estimator.cluster_centers_
x_min, x_max = min(x) + 1, max(x) - 1
y_min, y_max = min(y) + 1, max(y) - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plt.figure(1)
# plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
colors = ['g'] * len(x)
ax.scatter(x, y, c=colors)
ax.scatter(np.mean(x), np.mean(y), c='r')
ax.scatter(np.median(x), np.median(y), c='b')
delta = 0.025
X = np.arange(plot_lim_min, plot_lim_max, delta)
Y = np.arange(plot_lim_min, plot_lim_max, delta)
X,Y = np.meshgrid(X,Y)
Z = mlab.bivariate_normal(X, Y, np.std(x), np.std(y), np.mean(x), np.mean(y))
plt.contour(X,Y,Z)
# for i, r in df.iterrows() :
# if r['attack']
# for i, p in enumerate(cproj):
# if res[i] == 8 :
# ax1.scatter(p[0], p[1], c='g')
# plt.xticks(())
# plt.yticks(())
plt.show()
plt.close()
if __name__ == '__main__':
""" Anomaly detection with spectral clustering algorithm.
First training set only, to see what would happen with only known classes
Next with test set, to see what would happen with only unknown classes
"""
import time
start = time.time()
logger.set_file(today + "/log_plots.txt")
gen_plots()
# gen_one_plot()
# test()
elapsed = (time.time() - start)
print "done in %s seconds" % (elapsed)
|
#!/usr/local/bin/python
import socket
import time
import exceptions as e
from config import *
def change_nick(irc)
irc.send("NICK " + cfg['botnick'] + "\r\n")
x = "_"
while("Nickname is already in use" in irc.recv(4096)):
irc.send("NICK " + nick + x + "\r\n")
x += "_"
time.sleep(1)
def check_config():
for x,y in cfg.iteritems():
if not y:
if x not in cfg['optional_cfg']:
raise e.ConfigError("Found an error in config.py file")
def connect():
'''Connects to the server through a socket,
identifies the bot to the server and to NickServ'''
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #defines the socket
irc.connect((cfg['server'],cfg['port']))
irc.send("USER " + cfg['botnick'] + " 8 * :" + cfg['realname'] + "\r\n")
name_bot(cfg)
irc.send("JOIN "+ cfg['channels'] +"\r\n")
def name_bot(irc):
nick = change_nick(irc)
irc.send("PRIVMSG nickserv :identify %s %s\r\n" % (cfg['botnick'], cfg['password']))
def parse_cmd(text):
resp = text.split()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 13:40:02 2021
@author: default
"""
import torch
from torch import nn, einsum
from einops import rearrange
def pair(x):
return (x, x) if not isinstance(x, tuple) else x
def expand_dim(t, dim, k):
t = t.unsqueeze(dim = dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def rel_to_abs(x):
b, h, l, _, device, dtype = *x.shape, x.device, x.dtype
dd = {'device': device, 'dtype': dtype}
col_pad = torch.zeros((b, h, l, 1), **dd)
x = torch.cat((x, col_pad), dim=3)
flat_x = rearrange(x, 'b h l c -> b h (l c)')
flat_pad = torch.zeros((b, h, l - 1), **dd)
flat_x_padded = torch.cat((flat_x, flat_pad), dim=2)
final_x = flat_x_padded.reshape(b, h, l + 1, 2 * l - 1)
final_x = final_x[:, :, :l, (l - 1):]
return final_x
# the output shape of rel_to_abs: [batch_size, heads * height, width, width]
def relative_logits_1d(q, rel_k):
b, heads, h, w, dim = q.shape
logits = einsum('bhxyd,rd->bhxyr', q, rel_k)
logits = rearrange(logits, 'b h x y r -> b (h x) y r')
logits = rel_to_abs(logits)
logits = logits.reshape(b, heads, h, w, w)
logits = expand_dim(logits, dim=3, k=h)
return logits
# the output shape of relative_logits_1d: [batch_size, heads, height, height, width, width]
# dim_head = channels // heads
class RelPosEmb(nn.Module):
def __init__(self, fmap_size, dim_head):
super().__init__()
height, width = pair(fmap_size)
scale = dim_head ** -0.5
self.fmap_size = fmap_size
self.rel_height = nn.Parameter(torch.randn(2 * height - 1, dim_head) * scale)
self.rel_width = nn.Parameter(torch.randn(2 * width - 1, dim_head) * scale)
def forward(self, q):
h, w = self.fmap_size
q = rearrange(q, 'b h (x y) d -> b h x y d', x = h, y = w)
rel_logits_w = relative_logits_1d(q, self.rel_width)
rel_logits_w = rearrange(rel_logits_w, 'b h x i y j -> b h (x y) (i j)')
q = rearrange(q, 'b h x y d -> b h y x d')
rel_logits_h = relative_logits_1d(q, self.rel_height)
rel_logits_h = rearrange(rel_logits_h, 'b h x i y j -> b h (y x) (j i)')
return rel_logits_w + rel_logits_h
# the output shape of RelPosEmb: [batch_size, heads, height * width, height * width]
class AbsPosEmb(nn.Module):
def __init__(self, fmap_size, dim_head):
super().__init__()
height, width = pair(fmap_size)
scale = dim_head ** -0.5
self.height = nn.Parameter(torch.randn(height, dim_head) * scale)
self.width = nn.Parameter(torch.randn(width, dim_head) * scale)
def forward(self, q):
emb = rearrange(self.height, 'h d -> h () d') + rearrange(self.width, 'w d -> () w d')
emb = rearrange(emb, 'h w d -> (h w) d')
logits = einsum('bhid,jd->bhij', q, emb)
return logits
# the output shape of AbsPosEmb: [batch_size, heads, height * width, height * width]
class Attention(nn.Module):
def __init__(self, *,
dim,
fmap_size,
heads,
dim_head,
rel_pos_emb=True):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias=False)
rel_pos_class = RelPosEmb if rel_pos_emb else AbsPosEmb
self.pos_emb = rel_pos_class(fmap_size, dim_head)
self.softmax = nn.Softmax(dim=-1)
def forward(self, fmap):
heads, b, c, h, w = self.heads, *fmap.shape
q, k, v = self.to_qkv(fmap).chunk(3, dim=1)
q, k, v = map(lambda t: rearrange(t, 'b (h d) x y -> b h (x y) d', h = heads), (q, k, v))
q *= self.scale
sim = einsum('bhid,bhjd->bhij', q, k)
sim += self.pos_emb(q)
attn = self.softmax(sim)
out = einsum('bhij,bhjd->bhid', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x=h, y=w)
return out
class BottleBlock(nn.Module):
def __init__(self, *,
dim,
fmap_size,
dim_out,
proj_factor,
stride,
heads,
dim_head,
rel_pos_emb,
activation=nn.ReLU()):
super().__init__()
if stride != 1 or dim != dim_out:
self.shortcut = nn.Sequential(nn.Conv2d(dim, dim_out, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(dim_out),
activation)
else:
self.shortcut = nn.Identity()
attn_dim_in = dim_out // proj_factor
attn_dim_out = heads * dim_head
self.net = nn.Sequential(nn.Conv2d(dim, attn_dim_in, 1, bias=False),
nn.BatchNorm2d(attn_dim_in),
activation,
Attention(dim=attn_dim_in,
fmap_size=fmap_size,
heads=heads,
dim_head=dim_head,
rel_pos_emb=rel_pos_emb),
nn.AvgPool2d((2, 2), (2, 2)) if stride != 1 else nn.Identity(),
nn.BatchNorm2d(attn_dim_out),
activation,
nn.Conv2d(attn_dim_out, dim_out, 1, bias=False),
nn.BatchNorm2d(attn_dim_out))
nn.init.zeros_(self.net[-1].weight)
self.activation = activation
def forward(self, x):
shortcut = self.shortcut(x)
x = self.net(x)
x += shortcut
return self.activation(x)
|
from django.contrib.gis.geos import GEOSGeometry
from rest_framework import serializers as rest_serializers
from geotrek.feedback import models as feedback_models
class ReportSerializer(rest_serializers.ModelSerializer):
class Meta:
model = feedback_models.Report
geo_field = 'geom'
id_field = 'id'
def validate_geom(self, attrs, source):
if source not in attrs:
return attrs
geom = attrs[source]
point = GEOSGeometry(geom, srid=4326)
attrs[source] = point
return attrs
|
TEST_ORDER_DATA = 100
class WithDoNothing:
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
|
#! /usr/bin/env python
import cv2 as cv
import numpy as np
import math
import rospy
from yaml_parser import Parser
from iros_vision.msg import ObjectCoords
MAIN_WINDOW_NAME = 'camera'
DEBUG = 'debug'
class Color_object:
def __init__(self, config):
self.debug = None
self.config = config
self.name = self.config['name']
self.min_size = self.config['min_size']
self.max_size = self.config['max_size']
self.color = self.config['colour']
self.hsv = None
self.hsv_vals = {}
self.pub = rospy.Publisher(
'/iros_vision_node/' + self.name, ObjectCoords, queue_size=1)
def publish_coords(self, x, y, width, height, area, angle):
message = ObjectCoords()
message.x = x
message.y = y
message.width = width
message.height = height
message.area = area
message.angle = angle
self.pub.publish(message)
# print('{0}: x = {1}\ty = {2}\tarea = {3}\tangle = {4}'.format(self.name, x, y, area, angle))
class Vision:
def __init__(self, file_path):
self.parser = Parser(file_path)
data = self.parser.data
self.color_objects = []
for d in data['object']:
new_color = Color_object(d)
col_lb = new_color.config['lower_bound']
col_ub = new_color.config['upper_bound']
new_color.hsv_vals = {'HL': col_lb[0], 'HU': col_ub[0], 'SL': col_lb[1], 'SU': col_ub[1], 'VL': col_lb[2], 'VU': col_ub[2]}
self.color_objects.append(new_color)
self.selected_object = self.color_objects[0]
self.frame = None
self.cap = cv.VideoCapture(0)
cv.namedWindow(MAIN_WINDOW_NAME)
cv.namedWindow(DEBUG)
if len(self.color_objects) > 1:
cv.createTrackbar('Key', DEBUG, 0, len(self.color_objects)-1, self.pick_object)
cv.createTrackbar('H (lower)', DEBUG, 0, 255, lambda val: self.update_from_trackbars(val, 'HL'))
cv.createTrackbar('H (upper)', DEBUG, 0, 255, lambda val: self.update_from_trackbars(val, 'HU'))
cv.createTrackbar('S (lower)', DEBUG, 0, 255, lambda val: self.update_from_trackbars(val, 'SL'))
cv.createTrackbar('S (upper)', DEBUG, 0, 255, lambda val: self.update_from_trackbars(val, 'SU'))
cv.createTrackbar('V (lower)', DEBUG, 0, 255, lambda val: self.update_from_trackbars(val, 'VL'))
cv.createTrackbar('V (upper)', DEBUG, 0, 255, lambda val: self.update_from_trackbars(val, 'VU'))
col_lb = self.selected_object.config['lower_bound']
col_ub = self.selected_object.config['upper_bound']
self.update_trackbars(col_lb[0], col_ub[0], \
col_lb[1], col_ub[1], \
col_lb[2], col_ub[2])
cv.setMouseCallback(MAIN_WINDOW_NAME, self.mouse_cb)
def pick_object(self, val):
self.selected_object = self.color_objects[val]
lower_b = self.selected_object.config['lower_bound']
upper_b = self.selected_object.config['upper_bound']
self.update_trackbars(lower_b[0], upper_b[0], lower_b[1], upper_b[1], lower_b[2], upper_b[2])
def update_from_trackbars(self, val, var):
self.selected_object.hsv_vals[var] = val
bound = None
index = -1
if var[0] == 'H':
index = 0
elif var[0] == 'S':
index = 1
elif var[0] == 'V':
index = 2
if var[1] == 'U':
bound = 'upper_bound'
elif var[1] == 'L':
bound = 'lower_bound'
self.parser.write_values(self.selected_object.config, bound, index, val)
def update_trackbars(self, hl, hu, sl, su, vl, vu):
cv.setTrackbarPos('H (lower)', DEBUG, hl)
cv.setTrackbarPos('H (upper)', DEBUG, hu)
cv.setTrackbarPos('S (lower)', DEBUG, sl)
cv.setTrackbarPos('S (upper)', DEBUG, su)
cv.setTrackbarPos('V (lower)', DEBUG, vl)
cv.setTrackbarPos('V (upper)', DEBUG, vu)
def create_hsv(self):
blurred = cv.GaussianBlur(self.frame, (11,11),0)
for color_obj in self.color_objects:
color_obj.hsv = cv.cvtColor(blurred, cv.COLOR_BGR2HSV)
lower_bound = (color_obj.hsv_vals['HL'], color_obj.hsv_vals['SL'], color_obj.hsv_vals['VL'])
upper_bound = (color_obj.hsv_vals['HU'], color_obj.hsv_vals['SU'], color_obj.hsv_vals['VU'])
color_obj.debug = cv.inRange(color_obj.hsv, lower_bound, upper_bound) # get binary
color_obj.debug = cv.erode(color_obj.debug, None, iterations=2)
color_obj.debug = cv.dilate(color_obj.debug, None, iterations=2)
def read_camera(self):
_, self.frame = self.cap.read()
if not self.cap.isOpened():
print('Cannot open camera')
exit()
self.frame = cv.resize(self.frame, (320, 240))
def mouse_cb(self, event, x, y, flags, param):
if event == cv.EVENT_LBUTTONDOWN:
self.click_reg = [(x,y)]
elif event == cv.EVENT_LBUTTONUP:
self.click_reg.append((x,y))
mean = cv.mean(self.selected_object.hsv[self.click_reg[0][1]:y, self.click_reg[0][0]:x])
h_mean = int(math.floor(mean[0]))
s_mean = int(math.floor(mean[1]))
v_mean = int(math.floor(mean[2]))
init_bound = 20
self.update_trackbars(h_mean - init_bound, h_mean + init_bound, \
s_mean - init_bound, s_mean + init_bound, \
v_mean - init_bound, v_mean + init_bound)
|
from cobald.controller.linear import LinearController
from cobald.controller.relative_supply import RelativeSupplyController
from cobald.interfaces import Pool
from usim import time
class SimulatedLinearController(LinearController):
def __init__(
self, target: Pool, low_utilisation=0.5, high_allocation=0.5, rate=1, interval=1
):
super(SimulatedLinearController, self).__init__(
target, low_utilisation, high_allocation, rate, interval
)
async def run(self):
while True:
self.regulate(interval=self.interval)
await (time + self.interval)
class SimulatedRelativeSupplyController(RelativeSupplyController):
def __init__(
self,
target: Pool,
low_utilisation=0.5,
high_allocation=0.5,
low_scale=0.9,
high_scale=1.1,
interval=1,
):
super(SimulatedRelativeSupplyController, self).__init__(
target=target,
low_utilisation=low_utilisation,
high_allocation=high_allocation,
low_scale=low_scale,
high_scale=high_scale,
interval=interval,
)
async def run(self):
while True:
self.regulate(interval=self.interval)
await (time + self.interval)
class SimulatedCostController(SimulatedLinearController):
def __init__(
self, target: Pool, low_utilisation=0.5, high_allocation=0.5, rate=1, interval=1
):
self.current_cost = 1
super(SimulatedCostController, self).__init__(
target, low_utilisation, high_allocation, rate, interval
)
def regulate(self, interval):
allocation = 0
for drone in self.target.drones:
allocation += drone.allocation
if self.target.supply - allocation <= 1:
if self.target.utilisation >= 0.8:
self.target.demand = int(allocation + self.current_cost)
self.current_cost += 1
else:
self.target.demand = allocation
if self.current_cost > 1:
self.current_cost -= 1
# self.target.demand = allocation + self.current_cost
# else:
# if self.current_cost > 1:
# self.current_cost -= 1
# self.target.demand = allocation + self.current_cost
|
import pandas as pd
import numpy
import datetime
print 'Stats for events'
df_events = pd.read_csv('data_v2/events.csv', parse_dates=['start', 'end', 'event_created'], date_parser=pd.core.tools.datetimes.to_datetime)
print 'Num events: %s' % len(df_events.index)
num_test = numpy.asscalar(df_events[df_events.name.str.contains('test', case=False)]['name'].count())
print 'contains test: %s' % num_test
df_events_notest = df_events[~df_events.name.str.contains('test', case=False)].copy()
df_events_notest['start'] = pd.to_datetime(df_events_notest['start'])
df_events_notest['end'] = pd.to_datetime(df_events_notest['end'])
df_events_notest['event_created'] = pd.to_datetime(df_events_notest['event_created'])
print df_events_notest.head()
print df_events_notest['event_created'].min(), df_events_notest['event_created'].max()
first = pd.Timestamp(datetime.date(2010, 1, 1))
last = pd.Timestamp(datetime.date(2018, 1, 1))
print df_events_notest[(df_events_notest['start'] < first) | (df_events_notest['start'] > last)]
print df_events_notest[(df_events_notest['end'] < first) | (df_events_notest['end'] > last)]
# set the duration of the event
df_events_notest['duration'] = df_events_notest.apply(lambda row: row['end'] - row['start'], axis=1)
print df_events_notest.head()
tickets = pd.read_csv('data_v2/tickets.csv')
purchases = pd.read_csv('data_v2/ticket_purchases.csv')
def tickets_for_event(row):
event_id = row['event_id']
purchase_ids = tickets.loc[tickets['TICKET_FOR_EVENT_ID'] == event_id]['PURCHASE_ID']
return ','.join(str(v) for v in user_ids.tolist())
df_events_notest['num_tickets'] = df_events_notest.apply(lambda row: tickets_for_event(row), axis=1, reduce=True)
print df_events_notest.fillna('undefined').groupby(['num_tickets']).size().reset_index(name='count')
|
import cv2
cap = cv2.VideoCapture(0) # pode ser -1
print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.set(3, 3000)
cap.set(4, 3000)
print(cap.get(3))
print(cap.get(4))
while (cap.isOpened()):
ret, frame = cap.read()
if ret == True:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', frame)
#cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
|
#!/usr/bin/env python
# http://www.pythonchallenge.com/pc/return/5808.html
import Image,ImageEnhance, urllib2, StringIO
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password(realm='inflate', uri='http://www.pythonchallenge.com/pc/return/', user='huge', passwd='file')
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
pic = urllib2.urlopen('http://www.pythonchallenge.com/pc/return/cave.jpg').read()
img = Image.open(StringIO.StringIO(pic))
img.show()
im = ImageEnhance.Brightness(img)
im.enhance(10).show()
|
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import numpy as np
import tensorflow as tf
import argparse
import os
def train(args):
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
save(model, args.model_dir)
def save(model, model_dir):
sess = K.get_session()
tf.saved_model.simple_save(
sess,
os.path.join(model_dir, 'model/1'),
inputs={'inputs': model.input},
outputs={t.name: t for t in model.outputs})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--num-classes', type=int, default=10)
parser.add_argument('--epochs', type=int, default=12)
# input data and model directories
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
args, _ = parser.parse_known_args()
train(args)
|
import sublime, sublime_plugin, subprocess, threading, time
class Bdo(sublime_plugin.TextCommand):
def run(self, cmd):
sublime.active_window().show_input_panel("bdo ", "update", self.execute, None, None)
def execute(self, cmd):
output = subprocess.Popen(
"echo " + cmd + " | nc -w 10 localhost 9090",
shell=True, stdout=subprocess.PIPE).stdout.read()
if len(output) > 0 and output != "Sending link update ...\n":
view = sublime.active_window().new_file()
edit = view.begin_edit()
view.insert(edit, 0, output)
view.end_edit(edit)
|
from typing import Tuple
from typing import List
from typing import Union
from .sprite import Sprite, Texture
from arcade.text import Text
from arcade.color import BLACK
import PIL
from PIL import ImageFilter
RGB = Union[Tuple[int, int, int], List[int]]
RGBA = Union[Tuple[int, int, int, int], List[int]]
Color = Union[RGB, RGBA]
Point = Union[Tuple[float, float], List[float]]
class PILText:
cache = {}
@staticmethod
def render_text(text: str,
start_x: float, start_y: float,
color: Color,
font_size: float = 12,
width: int = 0,
align: str = "left",
font_name=('calibri', 'arial'),
bold: bool = False,
italic: bool = False,
anchor_x: str = "left",
anchor_y: str = "baseline",
rotation: float = 0) -> Sprite:
font_size *= 1.25
scale_up = 5
scale_down = 5
font_size *= scale_up
key = f"{text}{color}{font_size}{width}{align}{font_name}{bold}{italic}"
label = Text()
# Figure out the font to use
font = None
# Font was specified with a string
if isinstance(font_name, str):
try:
font = PIL.ImageFont.truetype(font_name, int(font_size))
except OSError:
# print(f"1 Can't find font: {font_name}")
pass
if font is None:
try:
temp_font_name = f"{font_name}.ttf"
font = PIL.ImageFont.truetype(temp_font_name, int(font_size))
except OSError:
# print(f"2 Can't find font: {temp_font_name}")
pass
# We were instead given a list of font names, in order of preference
else:
for font_string_name in font_name:
try:
font = PIL.ImageFont.truetype(font_string_name, int(font_size))
# print(f"3 Found font: {font_string_name}")
except OSError:
# print(f"3 Can't find font: {font_string_name}")
pass
if font is None:
try:
temp_font_name = f"{font_name}.ttf"
font = PIL.ImageFont.truetype(temp_font_name, int(font_size))
except OSError:
# print(f"4 Can't find font: {temp_font_name}")
pass
if font is not None:
break
# Default font if no font
if font is None:
font_names = ("arial.ttf",
"/usr/share/fonts/truetype/freefont/FreeMono.ttf",
'/System/Library/Fonts/SFNSDisplay.ttf')
for font_string_name in font_names:
try:
font = PIL.ImageFont.truetype(font_string_name, int(font_size))
break
except OSError:
# print(f"5 Can't find font: {font_string_name}")
pass
# This is stupid. We have to have an image to figure out what size
# the text will be when we draw it. Of course, we don't know how big
# to make the image. Catch-22. So we just make a small image we'll trash
text_image_size = (10, 10)
image = PIL.Image.new("RGBA", text_image_size)
draw = PIL.ImageDraw.Draw(image)
# Get size the text will be
text_image_size = draw.multiline_textsize(text, font=font)
# Create image of proper size
text_height = text_image_size[1]
text_width = text_image_size[0]
if text_width == 0:
return None
image_start_x = 0
if width == 0:
width = text_image_size[0]
else:
# Wait! We were given a field width.
if align == "center":
# Center text on given field width
field_width = width * scale_up
text_image_size = field_width, text_height
image_start_x = (field_width - text_width) // 2
width = field_width
else:
image_start_x = 0
# If we draw a y at 0, then the text is drawn with a baseline of 0,
# cutting off letters that drop below the baseline. This shoves it
# up a bit.
image_start_y = - font_size * scale_up * 0.02
image = PIL.Image.new("RGBA", text_image_size)
draw = PIL.ImageDraw.Draw(image)
# Convert to tuple if needed, because the multiline_text does not take a
# list for a color
if isinstance(color, list):
color = tuple(color)
draw.multiline_text((image_start_x, image_start_y), text, color, align=align, font=font)
image = image.resize((width // scale_down, text_height // scale_down), resample=PIL.Image.LANCZOS)
text_sprite = Sprite()
text_sprite._texture = Texture(key)
text_sprite._texture.image = image
text_sprite.image = image
text_sprite.texture_name = key
text_sprite.width = image.width
text_sprite.height = image.height
if anchor_x == "left":
text_sprite.center_x = start_x + text_sprite.width / 2
elif anchor_x == "center":
text_sprite.center_x = start_x
elif anchor_x == "right":
text_sprite.right = start_x
else:
raise ValueError(f"anchor_x should be 'left', 'center', or 'right'. Not '{anchor_x}'")
if anchor_y == "top":
text_sprite.center_y = start_y + text_sprite.height / 2
elif anchor_y == "center":
text_sprite.center_y = start_y
elif anchor_y == "bottom" or anchor_y == "baseline":
text_sprite.bottom = start_y
else:
raise ValueError(f"anchor_x should be 'top', 'center', 'bottom', or 'baseline'. Not '{anchor_y}'")
text_sprite.angle = rotation
return text_sprite
@staticmethod
def determine_dimensions(control, text: str,
font_size: float = 12,
width: int = 0,
align: str = "left",
font_name=('calibri', 'arial'),
bold: bool = False,
italic: bool = False,
anchor_x: str = "left",
anchor_y: str = "baseline",
rotation: float = 0
):
font_size *= 1.25
scale_up = 5
scale_down = 5
font_size *= scale_up
label = Text()
# Figure out the font to use
font = None
# Font was specified with a string
if isinstance(font_name, str):
try:
font = PIL.ImageFont.truetype(font_name, int(font_size))
except OSError:
# print(f"1 Can't find font: {font_name}")
pass
if font is None:
try:
temp_font_name = f"{font_name}.ttf"
font = PIL.ImageFont.truetype(temp_font_name, int(font_size))
except OSError:
# print(f"2 Can't find font: {temp_font_name}")
pass
# We were instead given a list of font names, in order of preference
else:
for font_string_name in font_name:
try:
font = PIL.ImageFont.truetype(font_string_name, int(font_size))
# print(f"3 Found font: {font_string_name}")
except OSError:
# print(f"3 Can't find font: {font_string_name}")
pass
if font is None:
try:
temp_font_name = f"{font_name}.ttf"
font = PIL.ImageFont.truetype(temp_font_name, int(font_size))
except OSError:
# print(f"4 Can't find font: {temp_font_name}")
pass
if font is not None:
break
# Default font if no font
if font is None:
font_names = ("arial.ttf",
"/usr/share/fonts/truetype/freefont/FreeMono.ttf",
'/System/Library/Fonts/SFNSDisplay.ttf')
for font_string_name in font_names:
try:
font = PIL.ImageFont.truetype(font_string_name, int(font_size))
break
except OSError:
# print(f"5 Can't find font: {font_string_name}")
pass
# This is stupid. We have to have an image to figure out what size
# the text will be when we draw it. Of course, we don't know how big
# to make the image. Catch-22. So we just make a small image we'll trash
text_image_size = (10, 10)
image = PIL.Image.new("RGBA", text_image_size)
draw = PIL.ImageDraw.Draw(image)
# Get size the text will be
text_image_size = draw.multiline_textsize(text, font=font)
# Create image of proper size
text_height = text_image_size[1]
text_width = text_image_size[0]
if text_width == 0:
return control.width, text_height
image_start_x = 0
if width == 0:
width = text_image_size[0]
else:
# Wait! We were given a field width.
if align == "center":
# Center text on given field width
field_width = width * scale_up
text_image_size = field_width, text_height
image_start_x = (field_width - text_width) // 2
width = field_width
else:
image_start_x = 0
# If we draw a y at 0, then the text is drawn with a baseline of 0,
# cutting off letters that drop below the baseline. This shoves it
# up a bit.
image_start_y = - font_size * scale_up * 0.02
image = PIL.Image.new("RGBA", text_image_size)
draw = PIL.ImageDraw.Draw(image)
draw.multiline_text((image_start_x, image_start_y), text, BLACK, align=align, font=font)
image = image.resize((width // scale_down, text_height // scale_down), resample=PIL.Image.LANCZOS)
text_sprite = Sprite()
text_sprite.image = image
text_sprite.width = image.width
text_sprite.height = image.height
return text_sprite.width, text_sprite.height
@staticmethod
def draw_text(control, text: str,
start_x: float, start_y: float,
color: Color,
font_size: float = 12,
width: int = 0,
align: str = "left",
font_name=('calibri', 'arial'),
bold: bool = False,
italic: bool = False,
anchor_x: str = "left",
anchor_y: str = "baseline",
rotation: float = 0,
blur_factor=0
):
# Scale the font up, so it matches with the sizes of the old code back
# when Pyglet drew the text.
font_size *= 1.25
# Text isn't anti-aliased, so we'll draw big, and then shrink
scale_up = 5
scale_down = 5
font_size *= scale_up
# If the cache gets too large, dump it and start over.
if len(PILText.cache) > 5000:
PILText.cache = {}
key = f"{text}{color}{font_size}{width}{align}{font_name}{bold}{italic}"
if key in PILText.cache:
label = PILText.cache[key]
text_sprite = label.text_sprite_list[0]
if anchor_x == "left":
text_sprite.center_x = start_x + text_sprite.width / 2
elif anchor_x == "center":
text_sprite.center_x = start_x
elif anchor_x == "right":
text_sprite.right = start_x
else:
raise ValueError(f"anchor_x should be 'left', 'center', or 'right'. Not '{anchor_x}'")
if anchor_y == "top":
text_sprite.center_y = start_y - text_sprite.height / 2
elif anchor_y == "center":
text_sprite.center_y = start_y
elif anchor_y == "bottom" or anchor_y == "baseline":
text_sprite.bottom = start_y
else:
raise ValueError(f"anchor_x should be 'left', 'center', or 'right'. Not '{anchor_y}'")
text_sprite.angle = rotation
else:
label = Text()
# Figure out the font to use
font = None
# Font was specified with a string
if isinstance(font_name, str):
try:
font = PIL.ImageFont.truetype(font_name, int(font_size))
except OSError:
# print(f"1 Can't find font: {font_name}")
pass
if font is None:
try:
temp_font_name = f"{font_name}.ttf"
font = PIL.ImageFont.truetype(temp_font_name, int(font_size))
except OSError:
# print(f"2 Can't find font: {temp_font_name}")
pass
# We were instead given a list of font names, in order of preference
else:
for font_string_name in font_name:
try:
font = PIL.ImageFont.truetype(font_string_name, int(font_size))
# print(f"3 Found font: {font_string_name}")
except OSError:
# print(f"3 Can't find font: {font_string_name}")
pass
if font is None:
try:
temp_font_name = f"{font_name}.ttf"
font = PIL.ImageFont.truetype(temp_font_name, int(font_size))
except OSError:
# print(f"4 Can't find font: {temp_font_name}")
pass
if font is not None:
break
# Default font if no font
if font is None:
font_names = ("arial.ttf",
"/usr/share/fonts/truetype/freefont/FreeMono.ttf",
'/System/Library/Fonts/SFNSDisplay.ttf')
for font_string_name in font_names:
try:
font = PIL.ImageFont.truetype(font_string_name, int(font_size))
break
except OSError:
# print(f"5 Can't find font: {font_string_name}")
pass
# This is stupid. We have to have an image to figure out what size
# the text will be when we draw it. Of course, we don't know how big
# to make the image. Catch-22. So we just make a small image we'll trash
text_image_size = (10, 10)
image = PIL.Image.new("RGBA", text_image_size)
draw = PIL.ImageDraw.Draw(image)
# Get size the text will be
text_image_size = draw.multiline_textsize(text, font=font)
# Create image of proper size
text_height = text_image_size[1]
text_width = text_image_size[0]
image_start_x = 0
if width == 0:
width = text_image_size[0]
else:
# Wait! We were given a field width.
if align == "center":
# Center text on given field width
field_width = width * scale_up
text_image_size = field_width, text_height
image_start_x = (field_width - text_width) // 2
width = field_width
else:
image_start_x = 0
# If we draw a y at 0, then the text is drawn with a baseline of 0,
# cutting off letters that drop below the baseline. This shoves it
# up a bit.
image_start_y = - font_size * scale_up * 0.02
image = PIL.Image.new("RGBA", text_image_size)
draw = PIL.ImageDraw.Draw(image)
# Convert to tuple if needed, because the multiline_text does not take a
# list for a color
if isinstance(color, list):
color = tuple(color)
draw.multiline_text((image_start_x, image_start_y), text, color, align=align, font=font)
image = image.resize((width // scale_down, text_height // scale_down), resample=PIL.Image.LANCZOS)
if blur_factor > 0:
image = image.filter(ImageFilter.GaussianBlur(blur_factor))
text_sprite = Sprite()
text_sprite._texture = Texture(key)
text_sprite._texture.image = image
text_sprite.image = image
text_sprite.texture_name = key
text_sprite.width = image.width
text_sprite.height = image.height
if anchor_x == "left":
text_sprite.center_x = start_x + text_sprite.width / 2
elif anchor_x == "center":
text_sprite.center_x = start_x
elif anchor_x == "right":
text_sprite.right = start_x
else:
raise ValueError(f"anchor_x should be 'left', 'center', or 'right'. Not '{anchor_x}'")
if anchor_y == "top":
text_sprite.center_y = start_y + text_sprite.height / 2
elif anchor_y == "center":
text_sprite.center_y = start_y
elif anchor_y == "bottom" or anchor_y == "baseline":
text_sprite.bottom = start_y
else:
raise ValueError(f"anchor_x should be 'top', 'center', 'bottom', or 'baseline'. Not '{anchor_y}'")
text_sprite.angle = rotation
from arcade.sprite_list import SpriteList
label.text_sprite_list = SpriteList()
label.text_sprite_list.append(text_sprite)
PILText.cache[key] = label
control.width = text_sprite.width
label.text_sprite_list.draw()
|
#
# PySNMP MIB module AVICI-FABRIC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/AVICI-FABRIC-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:16:31 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
aviciBayIndex, aviciSlotIndex = mibBuilder.importSymbols("AVICI-BAY-MIB", "aviciBayIndex", "aviciSlotIndex")
aviciMibs, = mibBuilder.importSymbols("AVICI-SMI", "aviciMibs")
AviciFabricLinkType, AviciSlotType, AviciBayType, AviciModuleName, AviciRevisionType = mibBuilder.importSymbols("AVICI-TC", "AviciFabricLinkType", "AviciSlotType", "AviciBayType", "AviciModuleName", "AviciRevisionType")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Counter32, Unsigned32, TimeTicks, Gauge32, ObjectIdentity, iso, IpAddress, ModuleIdentity, MibIdentifier, NotificationType, Counter64, Bits, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Unsigned32", "TimeTicks", "Gauge32", "ObjectIdentity", "iso", "IpAddress", "ModuleIdentity", "MibIdentifier", "NotificationType", "Counter64", "Bits", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
DisplayString, TruthValue, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "TextualConvention")
aviciFabricMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2474, 1, 8))
aviciFabricMIB.setRevisions(('0009-05-13 00:00',))
if mibBuilder.loadTexts: aviciFabricMIB.setLastUpdated('000905130000Z')
if mibBuilder.loadTexts: aviciFabricMIB.setOrganization('Avici Systems Inc.')
aviciFabricObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1))
aviciFabricGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 2474, 1, 8, 2))
aviciFabricCounters = MibIdentifier((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1))
aviciFabricConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2))
aviciFabricRouting = MibIdentifier((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3))
aviciFabricHardwareVersion = MibScalar((1, 3, 6, 1, 4, 1, 2474, 1, 8, 2, 1), AviciRevisionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricHardwareVersion.setStatus('current')
aviciFabricRoutingProtocolVersion = MibScalar((1, 3, 6, 1, 4, 1, 2474, 1, 8, 2, 2), AviciRevisionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricRoutingProtocolVersion.setStatus('current')
aviciFabricCountersTable = MibTable((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 1), )
if mibBuilder.loadTexts: aviciFabricCountersTable.setStatus('current')
aviciFabricCountersEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 1, 1), ).setIndexNames((0, "AVICI-BAY-MIB", "aviciBayIndex"), (0, "AVICI-BAY-MIB", "aviciSlotIndex"), (0, "AVICI-FABRIC-MIB", "aviciFabricLinkIndex"))
if mibBuilder.loadTexts: aviciFabricCountersEntry.setStatus('current')
aviciFabricHiPriPktsInserted = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 1, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricHiPriPktsInserted.setStatus('current')
aviciFabricLoPriPktsInserted = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 1, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLoPriPktsInserted.setStatus('current')
aviciFabricHiPriPktsForwarded = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 1, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricHiPriPktsForwarded.setStatus('current')
aviciFabricLoPriPktsForwarded = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 1, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLoPriPktsForwarded.setStatus('current')
aviciFabricHiPriFlitsForwarded = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 1, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricHiPriFlitsForwarded.setStatus('current')
aviciFabricLoPriFlitsForwarded = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 1, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLoPriFlitsForwarded.setStatus('current')
aviciFabricExtractedCountersTable = MibTable((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 2), )
if mibBuilder.loadTexts: aviciFabricExtractedCountersTable.setStatus('current')
aviciFabricExtractedCountersEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 2, 1), ).setIndexNames((0, "AVICI-BAY-MIB", "aviciBayIndex"), (0, "AVICI-BAY-MIB", "aviciSlotIndex"), (0, "AVICI-FABRIC-MIB", "aviciFabricLinkIndex"))
if mibBuilder.loadTexts: aviciFabricExtractedCountersEntry.setStatus('current')
aviciFabricExtractedHiPriPktsSelf = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 2, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricExtractedHiPriPktsSelf.setStatus('current')
aviciFabricExtractedLoPriPktsSelf = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 2, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricExtractedLoPriPktsSelf.setStatus('current')
aviciFabricExtractedHiPriPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 2, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricExtractedHiPriPkts.setStatus('current')
aviciFabricExtractedLoPriPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 2, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricExtractedLoPriPkts.setStatus('current')
aviciFabricExtractedHiPriFlits = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 2, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricExtractedHiPriFlits.setStatus('current')
aviciFabricExtractedLoPriFlits = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 1, 2, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricExtractedLoPriFlits.setStatus('current')
aviciFabricLinkTable = MibTable((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 1), )
if mibBuilder.loadTexts: aviciFabricLinkTable.setStatus('current')
aviciFabricLinkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 1, 1), ).setIndexNames((0, "AVICI-BAY-MIB", "aviciBayIndex"), (0, "AVICI-BAY-MIB", "aviciSlotIndex"), (0, "AVICI-FABRIC-MIB", "aviciFabricLinkIndex"))
if mibBuilder.loadTexts: aviciFabricLinkEntry.setStatus('current')
aviciFabricLinkIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 1, 1, 1), AviciFabricLinkType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLinkIndex.setStatus('current')
aviciFabricLinkDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 48))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLinkDescr.setStatus('current')
aviciFabricLinkAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aviciFabricLinkAdminStatus.setStatus('current')
aviciFabricLinkOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("admin-down", 3), ("failed", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLinkOperStatus.setStatus('current')
aviciFabricLinkSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 1, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLinkSpeed.setStatus('current')
aviciFabricLinkCRCErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLinkCRCErrors.setStatus('current')
aviciFabricLinkDownTransitions = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLinkDownTransitions.setStatus('current')
aviciFabricLinkLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 1, 1, 8), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLinkLastChange.setStatus('current')
aviciFabricChannelTable = MibTable((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 2), )
if mibBuilder.loadTexts: aviciFabricChannelTable.setStatus('current')
aviciFabricChannelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 2, 1), ).setIndexNames((0, "AVICI-BAY-MIB", "aviciBayIndex"), (0, "AVICI-BAY-MIB", "aviciSlotIndex"), (0, "AVICI-FABRIC-MIB", "aviciFabricChannelIndex"))
if mibBuilder.loadTexts: aviciFabricChannelEntry.setStatus('current')
aviciFabricChannelIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricChannelIndex.setStatus('current')
aviciFabricChannelHiPriScrubEvents = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricChannelHiPriScrubEvents.setStatus('current')
aviciFabricChannelLoPriScrubEvents = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricChannelLoPriScrubEvents.setStatus('current')
aviciFabricChannelHiPriLastScrubTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 2, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricChannelHiPriLastScrubTime.setStatus('current')
aviciFabricChannelLoPriLastScrubTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 2, 2, 1, 5), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricChannelLoPriLastScrubTime.setStatus('current')
aviciFabricLsaTable = MibTable((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1), )
if mibBuilder.loadTexts: aviciFabricLsaTable.setStatus('current')
aviciFabricLsaEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1, 1), ).setIndexNames((0, "AVICI-BAY-MIB", "aviciBayIndex"), (0, "AVICI-BAY-MIB", "aviciSlotIndex"), (0, "AVICI-FABRIC-MIB", "aviciFabricLsaOriginBay"), (0, "AVICI-FABRIC-MIB", "aviciFabricLsaOriginSlot"))
if mibBuilder.loadTexts: aviciFabricLsaEntry.setStatus('current')
aviciFabricLsaOriginBay = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1, 1, 1), AviciBayType())
if mibBuilder.loadTexts: aviciFabricLsaOriginBay.setStatus('current')
aviciFabricLsaOriginSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1, 1, 2), AviciSlotType())
if mibBuilder.loadTexts: aviciFabricLsaOriginSlot.setStatus('current')
aviciFabricLsaSequenceNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLsaSequenceNum.setStatus('current')
aviciFabricLsaCreationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLsaCreationTime.setStatus('current')
aviciFabricLsaModuleReachable = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLsaModuleReachable.setStatus('current')
aviciFabricLsaChannels = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLsaChannels.setStatus('current')
aviciFabricLsaModulePlusX = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1, 1, 7), AviciModuleName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLsaModulePlusX.setStatus('current')
aviciFabricLsaModuleMinusX = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1, 1, 8), AviciModuleName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLsaModuleMinusX.setStatus('current')
aviciFabricLsaModulePlusY = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1, 1, 9), AviciModuleName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLsaModulePlusY.setStatus('current')
aviciFabricLsaModuleMinusY = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1, 1, 10), AviciModuleName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLsaModuleMinusY.setStatus('current')
aviciFabricLsaModulePlusZ = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1, 1, 11), AviciModuleName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLsaModulePlusZ.setStatus('current')
aviciFabricLsaModuleMinusZ = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 1, 1, 12), AviciModuleName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLsaModuleMinusZ.setStatus('current')
aviciFabricLsaSummaryTable = MibTable((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 2), )
if mibBuilder.loadTexts: aviciFabricLsaSummaryTable.setStatus('current')
aviciFabricLsaSummaryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 2, 1), ).setIndexNames((0, "AVICI-BAY-MIB", "aviciBayIndex"), (0, "AVICI-BAY-MIB", "aviciSlotIndex"))
if mibBuilder.loadTexts: aviciFabricLsaSummaryEntry.setStatus('current')
aviciFabricLsaSummaryChecksum = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 2, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricLsaSummaryChecksum.setStatus('current')
aviciFabricPathTable = MibTable((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 3), )
if mibBuilder.loadTexts: aviciFabricPathTable.setStatus('current')
aviciFabricPathEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 3, 1), ).setIndexNames((0, "AVICI-BAY-MIB", "aviciBayIndex"), (0, "AVICI-BAY-MIB", "aviciSlotIndex"), (0, "AVICI-FABRIC-MIB", "aviciFabricPathDestinationBay"), (0, "AVICI-FABRIC-MIB", "aviciFabricPathDestinationSlot"), (0, "AVICI-FABRIC-MIB", "aviciFabricPathIndex"))
if mibBuilder.loadTexts: aviciFabricPathEntry.setStatus('current')
aviciFabricPathIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 24))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricPathIndex.setStatus('current')
aviciFabricPathDestinationBay = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 3, 1, 2), AviciBayType())
if mibBuilder.loadTexts: aviciFabricPathDestinationBay.setStatus('current')
aviciFabricPathDestinationSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 3, 1, 3), AviciSlotType())
if mibBuilder.loadTexts: aviciFabricPathDestinationSlot.setStatus('current')
aviciFabricPathVector = MibTableColumn((1, 3, 6, 1, 4, 1, 2474, 1, 8, 1, 3, 3, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 185))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aviciFabricPathVector.setStatus('current')
aviciFabricMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2474, 1, 8, 3))
aviciFabricMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2474, 1, 8, 3, 1))
aviciFabricMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2474, 1, 8, 3, 2))
aviciFabricMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2474, 1, 8, 3, 1, 1)).setObjects(("AVICI-FABRIC-MIB", "aviciFabricGroup"), ("AVICI-FABRIC-MIB", "aviciFabricCountersGroup"), ("AVICI-FABRIC-MIB", "aviciFabricGeneralGroup"), ("AVICI-FABRIC-MIB", "aviciFabricRoutingGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
aviciFabricMIBCompliance = aviciFabricMIBCompliance.setStatus('current')
aviciFabricCountersGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2474, 1, 8, 3, 2, 1)).setObjects(("AVICI-FABRIC-MIB", "aviciFabricHiPriPktsInserted"), ("AVICI-FABRIC-MIB", "aviciFabricLoPriPktsInserted"), ("AVICI-FABRIC-MIB", "aviciFabricHiPriPktsForwarded"), ("AVICI-FABRIC-MIB", "aviciFabricLoPriPktsForwarded"), ("AVICI-FABRIC-MIB", "aviciFabricHiPriFlitsForwarded"), ("AVICI-FABRIC-MIB", "aviciFabricLoPriFlitsForwarded"), ("AVICI-FABRIC-MIB", "aviciFabricExtractedHiPriPktsSelf"), ("AVICI-FABRIC-MIB", "aviciFabricExtractedLoPriPktsSelf"), ("AVICI-FABRIC-MIB", "aviciFabricExtractedHiPriPkts"), ("AVICI-FABRIC-MIB", "aviciFabricExtractedLoPriPkts"), ("AVICI-FABRIC-MIB", "aviciFabricExtractedHiPriFlits"), ("AVICI-FABRIC-MIB", "aviciFabricExtractedLoPriFlits"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
aviciFabricCountersGroup = aviciFabricCountersGroup.setStatus('current')
aviciFabricGeneralGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2474, 1, 8, 3, 2, 2)).setObjects(("AVICI-FABRIC-MIB", "aviciFabricLinkIndex"), ("AVICI-FABRIC-MIB", "aviciFabricLinkDescr"), ("AVICI-FABRIC-MIB", "aviciFabricLinkAdminStatus"), ("AVICI-FABRIC-MIB", "aviciFabricLinkOperStatus"), ("AVICI-FABRIC-MIB", "aviciFabricLinkSpeed"), ("AVICI-FABRIC-MIB", "aviciFabricLinkCRCErrors"), ("AVICI-FABRIC-MIB", "aviciFabricLinkDownTransitions"), ("AVICI-FABRIC-MIB", "aviciFabricLinkLastChange"), ("AVICI-FABRIC-MIB", "aviciFabricChannelIndex"), ("AVICI-FABRIC-MIB", "aviciFabricChannelHiPriScrubEvents"), ("AVICI-FABRIC-MIB", "aviciFabricChannelLoPriScrubEvents"), ("AVICI-FABRIC-MIB", "aviciFabricChannelHiPriLastScrubTime"), ("AVICI-FABRIC-MIB", "aviciFabricChannelLoPriLastScrubTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
aviciFabricGeneralGroup = aviciFabricGeneralGroup.setStatus('current')
aviciFabricRoutingGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2474, 1, 8, 3, 2, 3)).setObjects(("AVICI-FABRIC-MIB", "aviciFabricLsaSequenceNum"), ("AVICI-FABRIC-MIB", "aviciFabricLsaCreationTime"), ("AVICI-FABRIC-MIB", "aviciFabricLsaModuleReachable"), ("AVICI-FABRIC-MIB", "aviciFabricLsaChannels"), ("AVICI-FABRIC-MIB", "aviciFabricLsaModulePlusX"), ("AVICI-FABRIC-MIB", "aviciFabricLsaModuleMinusX"), ("AVICI-FABRIC-MIB", "aviciFabricLsaModulePlusY"), ("AVICI-FABRIC-MIB", "aviciFabricLsaModuleMinusY"), ("AVICI-FABRIC-MIB", "aviciFabricLsaModulePlusZ"), ("AVICI-FABRIC-MIB", "aviciFabricLsaModuleMinusZ"), ("AVICI-FABRIC-MIB", "aviciFabricLsaSummaryChecksum"), ("AVICI-FABRIC-MIB", "aviciFabricPathIndex"), ("AVICI-FABRIC-MIB", "aviciFabricPathVector"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
aviciFabricRoutingGroup = aviciFabricRoutingGroup.setStatus('current')
mibBuilder.exportSymbols("AVICI-FABRIC-MIB", aviciFabricLsaModulePlusY=aviciFabricLsaModulePlusY, aviciFabricLsaEntry=aviciFabricLsaEntry, aviciFabricGeneralGroup=aviciFabricGeneralGroup, aviciFabricRoutingProtocolVersion=aviciFabricRoutingProtocolVersion, aviciFabricLinkAdminStatus=aviciFabricLinkAdminStatus, aviciFabricMIBGroups=aviciFabricMIBGroups, aviciFabricPathVector=aviciFabricPathVector, aviciFabricExtractedCountersTable=aviciFabricExtractedCountersTable, aviciFabricLinkDownTransitions=aviciFabricLinkDownTransitions, aviciFabricHiPriPktsForwarded=aviciFabricHiPriPktsForwarded, aviciFabricMIB=aviciFabricMIB, aviciFabricLsaModuleMinusZ=aviciFabricLsaModuleMinusZ, aviciFabricLsaSummaryEntry=aviciFabricLsaSummaryEntry, aviciFabricLinkCRCErrors=aviciFabricLinkCRCErrors, aviciFabricPathIndex=aviciFabricPathIndex, aviciFabricPathDestinationBay=aviciFabricPathDestinationBay, aviciFabricLinkEntry=aviciFabricLinkEntry, aviciFabricExtractedHiPriPktsSelf=aviciFabricExtractedHiPriPktsSelf, aviciFabricConfig=aviciFabricConfig, aviciFabricChannelHiPriLastScrubTime=aviciFabricChannelHiPriLastScrubTime, aviciFabricPathEntry=aviciFabricPathEntry, aviciFabricPathTable=aviciFabricPathTable, aviciFabricChannelLoPriScrubEvents=aviciFabricChannelLoPriScrubEvents, aviciFabricHiPriPktsInserted=aviciFabricHiPriPktsInserted, aviciFabricLsaTable=aviciFabricLsaTable, aviciFabricChannelTable=aviciFabricChannelTable, aviciFabricChannelIndex=aviciFabricChannelIndex, aviciFabricCounters=aviciFabricCounters, aviciFabricRouting=aviciFabricRouting, aviciFabricLoPriFlitsForwarded=aviciFabricLoPriFlitsForwarded, aviciFabricChannelLoPriLastScrubTime=aviciFabricChannelLoPriLastScrubTime, aviciFabricMIBConformance=aviciFabricMIBConformance, aviciFabricLsaOriginBay=aviciFabricLsaOriginBay, aviciFabricExtractedLoPriFlits=aviciFabricExtractedLoPriFlits, aviciFabricExtractedCountersEntry=aviciFabricExtractedCountersEntry, aviciFabricLsaCreationTime=aviciFabricLsaCreationTime, aviciFabricMIBCompliances=aviciFabricMIBCompliances, aviciFabricLinkTable=aviciFabricLinkTable, PYSNMP_MODULE_ID=aviciFabricMIB, aviciFabricLsaSequenceNum=aviciFabricLsaSequenceNum, aviciFabricLsaSummaryChecksum=aviciFabricLsaSummaryChecksum, aviciFabricLoPriPktsInserted=aviciFabricLoPriPktsInserted, aviciFabricLsaModulePlusZ=aviciFabricLsaModulePlusZ, aviciFabricMIBCompliance=aviciFabricMIBCompliance, aviciFabricLinkOperStatus=aviciFabricLinkOperStatus, aviciFabricLsaSummaryTable=aviciFabricLsaSummaryTable, aviciFabricLinkSpeed=aviciFabricLinkSpeed, aviciFabricPathDestinationSlot=aviciFabricPathDestinationSlot, aviciFabricGroup=aviciFabricGroup, aviciFabricLinkIndex=aviciFabricLinkIndex, aviciFabricHardwareVersion=aviciFabricHardwareVersion, aviciFabricLsaModuleMinusY=aviciFabricLsaModuleMinusY, aviciFabricCountersEntry=aviciFabricCountersEntry, aviciFabricHiPriFlitsForwarded=aviciFabricHiPriFlitsForwarded, aviciFabricExtractedLoPriPktsSelf=aviciFabricExtractedLoPriPktsSelf, aviciFabricChannelHiPriScrubEvents=aviciFabricChannelHiPriScrubEvents, aviciFabricLsaOriginSlot=aviciFabricLsaOriginSlot, aviciFabricLsaChannels=aviciFabricLsaChannels, aviciFabricLsaModuleMinusX=aviciFabricLsaModuleMinusX, aviciFabricCountersTable=aviciFabricCountersTable, aviciFabricObjects=aviciFabricObjects, aviciFabricLinkLastChange=aviciFabricLinkLastChange, aviciFabricLoPriPktsForwarded=aviciFabricLoPriPktsForwarded, aviciFabricLsaModulePlusX=aviciFabricLsaModulePlusX, aviciFabricRoutingGroup=aviciFabricRoutingGroup, aviciFabricExtractedHiPriPkts=aviciFabricExtractedHiPriPkts, aviciFabricExtractedHiPriFlits=aviciFabricExtractedHiPriFlits, aviciFabricCountersGroup=aviciFabricCountersGroup, aviciFabricLsaModuleReachable=aviciFabricLsaModuleReachable, aviciFabricExtractedLoPriPkts=aviciFabricExtractedLoPriPkts, aviciFabricLinkDescr=aviciFabricLinkDescr, aviciFabricChannelEntry=aviciFabricChannelEntry)
|
"""
Abstract Object Encoder/Decoder
Object schema is specified in JSON Abstract Data Notation (JADN) format.
Codec currently supports three JSON concrete message formats (verbose,
concise, and minified) but can be extended to support XML or binary formats.
Copyright 2016, 2021 David Kemp
Licensed under the Apache License, Version 2.0
http://www.apache.org/licenses/LICENSE-2.0
"""
from typing import Any, Callable, Dict, List, Optional
from .codec import SymbolTableField, SymbolTableFieldDefinition, enctab, _decode_maprec, _encode_maprec
from .format_serialize_json import json_format_codecs, get_format_encode_function, get_format_decode_function
from .format_validate import format_validators, get_format_validate_function
from ..utils import ftopts_s2d, get_config, object_types, raise_error, topts_s2d
from ..definitions import (
# Field Indexes
BaseType, FieldID, FieldName,
# Const values
PRIMITIVE_TYPES, CORE_TYPES,
# Dataclass
TypeDefinition, GenFieldDefinition
)
from ..transform import unfold_extensions
class Codec:
"""
Serialize (encode) and De-serialize (decode) values, validate against JADN syntax.
verbose_rec - True: Record types encoded as maps
False: Record types encoded as arrays
verbose_str - True: Identifiers encoded as strings
False: Identifiers encoded as integer tags
"""
schema: dict # better typing??
config: dict # better typing??
format_validators: Dict[str, Dict[str, Callable[[Any], Any]]]
format_codec: dict # better typing??
types: Dict[str, TypeDefinition]
symtab = Dict[str, SymbolTableField]
verbose_rec: bool
verbose_str: bool
def __init__(self, schema: dict, verbose_rec=False, verbose_str=False, config: dict = None):
assert set(enctab) == set(CORE_TYPES)
self.schema = unfold_extensions(schema) # Convert extensions to core definitions
conf = config if config else schema
self.config = get_config(conf['info'] if 'info' in conf else None)
self.format_validate = format_validators() # Initialize format validation functions
self.format_codec = json_format_codecs() # Initialize format serialization functions
# pre-index types to allow symtab forward refs
self.types = {t.TypeName: t for t in object_types(self.schema['types'])}
self.symtab = {} # Symbol table - pre-computed values for all datatypes
self.set_mode(verbose_rec, verbose_str) # Create symbol table based on encoding mode
def decode(self, datatype: str, sval: Any) -> Any: # Decode serialized value into API value
try:
ts = self.symtab[datatype]
except KeyError:
raise_error(f'Validation Error: Decode: datatype "{datatype}" is not defined')
return ts.Decode(ts, sval, self) # Dispatch to type-specific decoder
def encode(self, datatype: str, aval: Any) -> Any: # Encode API value into serialized value
try:
ts = self.symtab[datatype]
except KeyError:
raise_error(f'Validation Error: Encode: datatype "{datatype}" is not defined')
return ts.Encode(ts, aval, self) # Dispatch to type-specific encoder
def set_mode(self, verbose_rec=False, verbose_str=False):
# Build symbol table field entries
def symf(fld: GenFieldDefinition, fa: int, fnames: dict) -> SymbolTableFieldDefinition:
fo, to = ftopts_s2d(fld.FieldOptions)
if to:
raise_error(f'Validation Error: {fld.FieldName}: internal error: unexpected type options: {to}')
fopts = {'minc': 1, 'maxc': 1, **fo}
assert fopts['minc'] in (0, 1) and fopts['maxc'] == 1 # Other cardinalities have been simplified
ctag: Optional[int] = None
if 'tagid' in fopts:
ctag = fopts['tagid'] if fa == FieldID else fnames[fopts['tagid']]
return SymbolTableFieldDefinition(
fld, # SF_DEF: JADN field definition
fopts, # SF_OPT: Field options (dict)
ctag # SF_CTAG: tagid option
)
# Generate TypeRef pattern - concatenate NSID: and TypeName patterns
def make_typeref_pattern(nsid: str, typename: str) -> dict:
ns = nsid.lstrip('^').rstrip('$')
tn = typename.lstrip('^').rstrip('$')
return {'pattern': fr'^({ns}:)?{tn}$'}
# Set configurable option values
def config_opts(opts: List[str]) -> dict:
op = [(v[0] + self.config[v[1:]]) if len(v) > 1 and v[1] == '$' else v for v in opts]
return topts_s2d(op)
def sym(t: TypeDefinition) -> SymbolTableField: # Build symbol table based on encoding modes
symval = SymbolTableField(
t, # 0: S_TDEF: JADN type definition
enctab[t.BaseType].Enc, # 1: S_ENCODE: Encoder for this type
enctab[t.BaseType].Dec, # 2: S_DECODE: Decoder for this type
enctab[t.BaseType].eType, # 3: S_ENCTYPE: Encoded value type
config_opts(t.TypeOptions), # 4: S_TOPTS: Type Options (dict)
)
if t.BaseType == 'Record':
symval.Encode = _encode_maprec # if self.verbose_rec else _encode_array
symval.Decode = _decode_maprec # if self.verbose_rec else _decode_array
symval.EncType = dict if self.verbose_rec else list
if t.BaseType in ('Enumerated', 'Array', 'Choice', 'Map', 'Record'):
fx = FieldName if 'id' not in symval.TypeOpts and t.BaseType != 'Array' and verbose_str else FieldID
fa = FieldName if 'id' not in symval.TypeOpts else FieldID
try:
symval.dMap = {f[fx]: f[fa] for f in t.Fields}
symval.eMap = {f[fa]: f[fx] for f in t.Fields}
fnames = {f[FieldID]: f[FieldName] for f in t.Fields}
except IndexError as e:
raise IndexError(f'symval index error: {e}')
if t.BaseType != 'Enumerated':
symval.Fld = {f[fx]: symf(f, fa, fnames) for f in t.Fields}
if t.BaseType in ('Binary', 'String', 'Array', 'ArrayOf', 'Map', 'MapOf', 'Record'):
minv = symval.TypeOpts.get('minv', 0)
maxv = symval.TypeOpts.get('maxv', 0)
if minv < 0 or maxv < 0:
raise_error(f'Validation Error: {t.TypeName}: length cannot be negative: {minv}..{maxv}')
if maxv == 0:
maxv = self.config['$MaxElements']
if t.BaseType in ('Binary', 'String'):
maxv = self.config[f'$Max{t.BaseType}']
symval.TypeOpts.update({'minv': minv, 'maxv': maxv})
fmt = symval.TypeOpts.get('format', '')
symval.FormatValidate = get_format_validate_function(self.format_validate, t.BaseType, fmt)
symval.FormatEncode = get_format_encode_function(self.format_codec, t.BaseType, fmt)
symval.FormatDecode = get_format_decode_function(self.format_codec, t.BaseType, fmt)
return symval
self.verbose_rec = verbose_rec
self.verbose_str = verbose_str
self.symtab = {t.TypeName: sym(t) for t in object_types(self.schema['types'])}
if 'TypeRef' in self.types:
self.symtab['TypeRef'].TypeOpts = make_typeref_pattern(self.config['$NSID'], self.config['$TypeName'])
for t in PRIMITIVE_TYPES:
self.symtab[t] = SymbolTableField(
TypeDef=TypeDefinition('', t),
Encode=enctab[t].Enc,
Decode=enctab[t].Dec,
EncType=enctab[t].eType,
TypeOpts={},
# TODO: check if t[BaseType] should just be t
FormatValidate=get_format_validate_function(self.format_validate, t[BaseType], ''),
FormatEncode=get_format_encode_function(self.format_codec, t[BaseType], ''),
FormatDecode=get_format_decode_function(self.format_codec, t[BaseType], '')
)
__all__ = ['Codec']
|
import logging
from datetime import datetime, timedelta
from typing import Optional, Sequence, Tuple
from snuba import util
from snuba.clickhouse.native import ClickhousePool
logger = logging.getLogger("snuba.cleanup")
def run_cleanup(
clickhouse: ClickhousePool, database: str, table: str, dry_run: bool = True
) -> int:
active_parts = get_active_partitions(clickhouse, database, table)
stale_parts = filter_stale_partitions(active_parts)
drop_partitions(clickhouse, database, table, stale_parts, dry_run=dry_run)
return len(stale_parts)
def get_active_partitions(
clickhouse: ClickhousePool, database: str, table: str
) -> Sequence[util.Part]:
response = clickhouse.execute(
"""
SELECT DISTINCT partition
FROM system.parts
WHERE database = %(database)s
AND table = %(table)s
AND active = 1
""",
{"database": database, "table": table},
)
return [util.decode_part_str(part) for part, in response]
def filter_stale_partitions(
parts: Sequence[util.Part], as_of: Optional[datetime] = None
) -> Sequence[Tuple[datetime, int]]:
"""Filter partitions of (datetime, retention_days) down to ones
that are out of the retention window based on `as_of` (default: now)."""
if as_of is None:
as_of = datetime.utcnow()
stale_parts = []
for part_date, retention_days in parts:
part_last_day = part_date + timedelta(days=6 - part_date.weekday())
if part_last_day < (as_of - timedelta(days=retention_days)):
stale_parts.append((part_date, retention_days))
return stale_parts
def drop_partitions(
clickhouse: ClickhousePool,
database: str,
table: str,
parts: Sequence[Tuple[datetime, int]],
dry_run: bool = True,
) -> None:
query_template = """\
ALTER TABLE %(database)s.%(table)s DROP PARTITION ('%(date_str)s', %(retention_days)s)
"""
for part_date, retention_days in parts:
args = {
"database": database,
"table": table,
"date_str": part_date.strftime("%Y-%m-%d"),
"retention_days": retention_days,
}
query = (query_template % args).strip()
if dry_run:
logger.info("Dry run: " + query)
else:
logger.info("Dropping partition: " + query)
clickhouse.execute(query)
|
# #####################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# #####################################################################################################################
import pytest
from aws_cdk import Stack, App
from constructs import Construct
from aws_solutions.cdk.aws_lambda.cfn_custom_resources.resource_name.name import (
ResourceName,
)
class SomeStack(Stack):
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
self.name_1 = ResourceName(self, "name_1", purpose="var_1", max_length=32)
self.name_2 = ResourceName(self, "name_2", purpose="var_2", max_length=32)
@pytest.fixture
def resource_naming_stack():
app = App()
SomeStack(app, "some-test-naming")
yield app.synth().get_stack_by_name("some-test-naming").template
def test_resource_service_tokens(resource_naming_stack):
# There should be only one lambda function generated.
service_tokens = [
resource["Properties"]["ServiceToken"]
for resource in resource_naming_stack["Resources"].values()
if resource["Type"] == "Custom::ResourceName"
]
assert all(st == service_tokens[0] for st in service_tokens)
|
import os
import uuid
from cloudinitd.cb_iaas import IaaSTestInstance
from cloudinitd.exceptions import APIUsageException
from cloudinitd.pollables import InstanceHostnamePollable
from cloudinitd.user_api import CloudInitD
import unittest
class ServiceUnitTests(unittest.TestCase):
def test_baddir_name(self):
try:
cb = CloudInitD("baddir", db_name="badname")
fail = True
except APIUsageException, apiex:
fail = False
self.assertFalse(fail)
def test_bad_opts1(self):
try:
cb = CloudInitD("/tmp")
fail = True
except APIUsageException, apiex:
fail = False
self.assertFalse(fail)
def test_service_poll(self):
x = None
if 'CLOUDINITD_TESTENV' in os.environ:
x = os.environ['CLOUDINITD_TESTENV']
os.environ['CLOUDINITD_TESTENV'] = "1"
h1 = str(uuid.uuid1())
instance = IaaSTestInstance(h1, time_to_hostname=1)
p = InstanceHostnamePollable(instance=instance)
p.start()
rc = False
while not rc:
rc = p.poll()
h2 = p.get_hostname()
self.assertEquals(h1, h2)
i = p.get_instance()
self.assertEqual(instance, i)
if x:
os.environ['CLOUDINITD_TESTENV'] = x
else:
del(os.environ['CLOUDINITD_TESTENV'])
if __name__ == '__main__':
unittest.main()
|
from .config import Config, ConfigDict, DictAction
from .path import check_file_exist, mkdir_or_exist
from .logging import get_logger, print_log
from .misc import is_seq_of, is_str, is_list_of
__all__ = ['Config', 'ConfigDict', 'DictAction', 'check_file_exist',
'mkdir_or_exist', 'get_logger', 'print_log', 'is_seq_of',
'is_str', 'is_list_of']
|
import abc
import logging
import traceback
import servicemanager
import win32event, win32service, win32api
from win32serviceutil import ServiceFramework
log = logging.getLogger(__name__)
class WindowsService(object, ServiceFramework):
"""
Base windows service class that provides all the nice things that a python
service needs
"""
__metaclass__ = abc.ABCMeta
def __init__(self, args):
try:
self._svc_name_ = args[0]
self._svc_display_name_ = args[0]
ServiceFramework.__init__(self, args)
self.stop_event = win32event.CreateEvent(None, 0, 0, None)
except Exception:
self.log("Error in WindowsService.__init__")
self.log(traceback.format_exc())
raise
def log(self, msg):
'Log to the NTEventlog'
servicemanager.LogInfoMsg(str(msg))
def sleep(self, sec):
win32api.Sleep(sec * 1000, True)
def SvcDoRun(self):
self.log('start')
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
try:
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self.log('start')
self.start()
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
# self.log('wait')
# win32event.WaitForSingleObject(self.stop_event, win32event.INFINITE)
self.log('done')
except Exception:
self.log("Error in WindowsService.SvcDoRun")
self.log(traceback.format_exc())
self.SvcStop()
def SvcStop(self):
pass
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.log('stopping')
self.stop()
self.log('stopped')
win32event.SetEvent(self.stop_event)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.contrib import noauth
from cinderclient.tests.unit import utils
class CinderNoAuthPluginTest(utils.TestCase):
def setUp(self):
super(CinderNoAuthPluginTest, self).setUp()
self.plugin = noauth.CinderNoAuthPlugin('user', 'project',
endpoint='example.com')
def test_auth_token(self):
auth_token = 'user:project'
self.assertEqual(auth_token, self.plugin.auth_token)
def test_auth_token_no_project(self):
auth_token = 'user:user'
plugin = noauth.CinderNoAuthPlugin('user')
self.assertEqual(auth_token, plugin.auth_token)
def test_get_headers(self):
headers = {'x-user-id': 'user',
'x-project-id': 'project',
'X-Auth-Token': 'user:project'}
self.assertEqual(headers, self.plugin.get_headers(None))
def test_get_endpoint(self):
endpoint = 'example.com/project'
self.assertEqual(endpoint, self.plugin.get_endpoint(None))
|
from .basedata import *
"""
作用:布局
"""
class MainFrameGUI(wx.Frame, BaseData):
def __init__(self, parent = None):
BaseData.__init__(self)
wx.Frame.__init__(self, parent, -1, title = CON_JDJANGO_TITLE, pos = wx.DefaultPosition, size = wx.Size(1000, 580), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL)
self._init_UI() # 初始化界面布局
self._init_menu() # 初始化菜单栏
self._init_systoolBar() # 初始化系统工具栏
self._init_statusbar() # 初始化底部状态栏
self._init_ctrls() # 初始化控制器
def _init_UI(self):
"""面板布局"""
self.needFonts = [] # 待设置字体的控件
'''
大容器(主要是描出四周的黑色边框)
'''
self.panel = wx.Panel(self)
self.panelSizer = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(self.panelSizer)
# self.panel.SetBackgroundColour(CON_COLOR_GREY)
self.panel.SetBackgroundColour(CON_COLOR_MAIN)
'''
顶部弹出信息框
'''
self.infoBar = wx.InfoBar(self.panel)
self.panelSizer.Add(self.infoBar, 0, wx.EXPAND)
### 使用方式 ###
# self.infoBar.ShowMessage("检测成功,具体内容详见输出窗口。", wx.ICON_INFORMATION)
# 可选第二参数:wx.ICON_NONE、wx.ICON_INFORMATION、wx.ICON_QUESTION、wx.ICON_WARNING、wx.ICON_ERROR
'''
实际存储容器(控件全部在这里)
'''
self.midPanel = wx.Panel(self.panel)
self.midPanelSizer = wx.BoxSizer(wx.VERTICAL)
self.midPanel.SetSizer(self.midPanelSizer)
self.midPanel.SetBackgroundColour('#ffffff')
'''
自定义工具条
'''
self._init_self_tools()
'''
输出提示面板(实时显示操作反馈信息)
'''
self._init_choicebook()
self._init_labelbook()
def _init_labelbook(self):
"""初始化标签切换控件"""
self.auiNotebook = aui.AuiNotebook(
self.panel,
style = wx.aui.AUI_NB_TAB_SPLIT # 无删除按钮
# | wx.aui.AUI_NB_TAB_MOVE # 标签可移动变换位置(不允许启用,首个标签不允许删除)
| wx.aui.AUI_NB_SCROLL_BUTTONS # 左右溢出部分隐藏
# | wx.aui.AUI_NB_WINDOWLIST_BUTTON # 允许上下左右拖拽
| wx.aui.AUI_NB_CLOSE_BUTTON # 在最右边显示关掉窗口按钮
# | wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB # 仅在激活标签上显示关掉窗口按钮
# | wx.aui.AUI_NB_CLOSE_ON_ALL_TABS # 在所有标签上显示关掉窗口按钮
# | wx.aui.AUI_NB_MIDDLE_CLICK_CLOSE
# | wx.aui.AUI_NB_TOP # 限定标签只能显示在最上方,不能随意拖拽
# | wx.aui.AUI_NB_BOTTOM # 限定标签只能显示在最下方,不能随意拖拽
)
self.labelBook = LB.LabelBook(
self.auiNotebook, -1,
agwStyle = LB.INB_FIT_BUTTON
| LB.INB_SHOW_ONLY_TEXT # 仅显示文本
| LB.INB_LEFT # 显示在右边
| LB.INB_BORDER # 画出边界
| LB.INB_DRAW_SHADOW # 描绘按钮阴影
# | LB.INB_GRADIENT_BACKGROUND # 绘制渐变色
| LB.INB_WEB_HILITE # hover超链接显示
| LB.INB_FIT_LABELTEXT
| LB.INB_BOLD_TAB_SELECTION
) # 79 80 73 DFE0D9
self.labelBook.SetColour(LB.INB_TAB_AREA_BACKGROUND_COLOUR, '#2c3e50')
# self.labelBook.SetColour(LB.INB_ACTIVE_TAB_COLOUR, colour)
# self.labelBook.SetColour(LB.INB_TABS_BORDER_COLOUR, colour)
self.labelBook.SetColour(LB.INB_TEXT_COLOUR, '#ffffff')
# self.labelBook.SetColour(LB.INB_ACTIVE_TEXT_COLOUR, colour)
# self.labelBook.SetColour(LB.INB_HILITE_TAB_COLOUR, colour)
self.panelSizer.Add(self.auiNotebook, 1, wx.EXPAND | wx.ALL, 5)
self.labelBook.AddPage(self.midPanel, '基本功能')
self.labelBook.AddPage(UrlsListPanel(self.panel), '路由')
self.labelBook.AddPage(wx.Panel(self.panel), '模型')
self.labelBook.AddPage(PipListCtrlPanel(self.panel), '三方库')
self.labelBook.AddPage(BatchExcelPanel(self.panel), '表格批处理')
self.labelBook.AddPage(wx.Panel(self.panel), '数据可视化')
self.labelBook.AddPage(wx.Panel(self.panel), '基爬虫API')
self.labelBook.AddPage(wx.Panel(self.panel), '人工智能API')
self.labelBook.AddPage(wx.Panel(self.panel), 'VUE快捷操作')
self.labelBook.AddPage(wx.Panel(self.panel), '命令')
self.labelBook.AddPage(WxPythonCtrlsPanel(self.panel), 'wxPython控件')
self.auiNotebook.AddPage(self.labelBook, '核心功能')
wx.CallAfter(self.auiNotebook.SendSizeEvent)
# self.labelBook.Refresh()
def _init_choicebook(self):
"""初始化选择窗口"""
choicebook = wx.Choicebook(self.midPanel)
self.midPanelSizer.Add(choicebook, 1, wx.EXPAND | wx.ALL, 1)
'''
自定义消息命令行
'''
panel1 = wx.Panel(choicebook)
panel1Sizer = wx.BoxSizer(wx.VERTICAL)
panel1.SetSizer(panel1Sizer)
self.infos = wx.TextCtrl(panel1, -1, style=wx.TE_MULTILINE)
panel1Sizer.Add(self.infos, 1, wx.EXPAND | wx.ALL)
choicebook.AddPage(panel1, '自定义消息命令行')
'''
原生 Python Shell 命令行
'''
panel2 = wx.Panel(choicebook)
panel2Sizer = wx.BoxSizer(wx.VERTICAL)
panel2.SetSizer(panel2Sizer)
self.pyShell = wx.py.shell.Shell(panel2, introText='【此环境取自您的本机Python环境,即运行此程序的Python环境】')
panel2Sizer.Add(self.pyShell, 1, wx.EXPAND | wx.ALL, 0)
choicebook.AddPage(panel2, f'Python Shell({sys.version})')
'''
增强版 原生指令行
'''
panel3 = wx.Panel(choicebook)
panel3Sizer = wx.BoxSizer(wx.VERTICAL)
panel3.SetSizer(panel3Sizer)
self.pyShellMore = wx.py.crust.Crust(panel3)
panel3Sizer.Add(self.pyShellMore, 1, wx.EXPAND | wx.ALL, 0)
choicebook.AddPage(panel3, f'Python Shell增强版({sys.version})')
def _init_menu(self):
"""设置工具栏"""
self.topBar = wx.MenuBar() # 创建顶部菜单条
self._init_menu_file() # 文件 菜单项
self._init_menu_percheck() # 单项检测 菜单项
self._init_menu_perfix() # 单项修复 菜单项
self._init_menu_admin() # 后台管理中心 菜单项
self._init_menu_run() # 运行 菜单项
self._init_menu_helps() # 帮助 菜单项
self._init_menu_quit() # 退出 菜单项
self.SetMenuBar(self.topBar)
def _init_systoolBar(self):
"""初始化系统工具栏"""
self.sys_toolbar = self.CreateToolBar(wx.TB_HORIZONTAL|wx.NO_BORDER|wx.TB_FLAT) # 工具栏
# self.sys_toolbar.SetBackgroundColour('#465789')
# self._append_separator_to_tools()
# self.shotcut_file = self.sys_toolbar.AddTool(wx.ID_ANY, "选择Django项目", wx.Bitmap(BITMAP_FILE_PATH), shortHelp='选择Django项目')
self.shotcut_run = self.sys_toolbar.AddTool(wx.ID_ANY, "运行", wx.Bitmap(BITMAP_RUN_PATH), shortHelp='运行')
self.shotcut_stop = self.sys_toolbar.AddTool(wx.ID_ANY, "停止", wx.Bitmap(BITMAP_STOP_PATH), shortHelp='停止')
# self._append_separator_to_tools()
# self.shotcut_database = self.sys_toolbar.AddTool(wx.ID_ANY, "数据库", wx.Bitmap(BITMAP_DATABASE_PATH), shortHelp='数据库')
# self.shotcut_setting = self.sys_toolbar.AddTool(wx.ID_ANY, "选项/修改", wx.Bitmap(BITMAP_SETTINGS_PATH), shortHelp='选项/修改')
self._append_separator_to_tools()
self.shotcut_code = self.sys_toolbar.AddTool(wx.ID_ANY, "VSCode打开", wx.Bitmap(BITMAP_CODE_PATH), shortHelp='VSCode打开')
self.shotcut_command = self.sys_toolbar.AddTool(wx.ID_ANY, "shell", wx.Bitmap(BITMAP_COMMAND_PATH), shortHelp='shell')
self._append_separator_to_tools()
self.shotcut_makemigration = self.sys_toolbar.AddTool(wx.ID_ANY, "makemigration", wx.Bitmap(BITMAP_MAKEMIGRATION_PATH), shortHelp='makemigration')
self.shotcut_migrate = self.sys_toolbar.AddTool(wx.ID_ANY, "migrate", wx.Bitmap(BITMAP_MIGRATE_PATH), shortHelp='migrate')
self._append_separator_to_tools()
self.shotcut_pipinstall = self.sys_toolbar.AddTool(wx.ID_ANY, "pip install", wx.Bitmap(BITMAP_PIPINSTALL_PATH), shortHelp='pip install')
self._append_separator_to_tools()
self.shotcut_info = self.sys_toolbar.AddTool(wx.ID_ANY, "帮助", wx.Bitmap(BITMAP_INFO_PATH), shortHelp='帮助')
self.sys_toolbar.Realize() # Windows 适应
def _init_menu_file(self):
"""文件"""
menus = wx.Menu()
self.topBar.Append(menus, "&文件")
self.menuOpen = menus.Append(wx.ID_OPEN, "&查看文件", "查看文件")
self._append_separator(menus)
menusOpenDjango = wx.Menu()
menus.Append(wx.ID_ANY, "&打开", menusOpenDjango)
self.menuVSCode = menusOpenDjango.Append(wx.ID_ANY, "&使用VSCode打开项目", "使用VSCode打开项目")
self._append_separator(menus)
menusCreate = wx.Menu()
menus.Append(wx.ID_ANY, "&新建", menusCreate)
menusCreateVersionProject = wx.Menu()
self.create_project = menusCreateVersionProject.Append(wx.ID_ANY, "&Django", "Django")
menusCreate.Append(wx.ID_ANY, "&项目", menusCreateVersionProject)
self._append_separator(menusCreate)
self.menuGenerate = menusCreate.Append(wx.ID_ANY, "&应用程序", "应用程序")
self._append_separator(menusCreate)
modelsSubMenu = wx.Menu()
self.modelsGenerate = modelsSubMenu.Append(wx.ID_ANY, "&完整模型", "完整模型")
self.modelsProxyGenerate = modelsSubMenu.Append(wx.ID_ANY, "&代理模型", "代理模型")
menusCreate.Append(wx.ID_ANY, "&模型", modelsSubMenu)
self._append_separator(menusCreate)
self.viewsGenerateFunc = menusCreate.Append(wx.ID_ANY, "&视图", "视图")
self.create_project.Enable(True)
self._append_separator(menusCreate)
self.viewsRestFramework = menusCreate.Append(wx.ID_ANY, "&rest-framework", "rest-framework")
self._append_separator(menus)
menusProject = wx.Menu()
menus.Append(wx.ID_ANY, "&Django项目", menusProject)
self.menusSettings = menusProject.Append(wx.ID_ANY, "&Settings", "Settings")
self._append_separator(menus)
settings = wx.Menu()
menus.Append(wx.ID_ANY, "&工具", settings)
fonts = wx.Menu()
settings.Append(wx.ID_ANY, "&字体", fonts)
self.fonts_minus = fonts.Append(wx.ID_ANY, "&-1", "-1")
self.fonts_add = fonts.Append(wx.ID_ANY, "&+1", "+1")
self._append_separator(settings)
self.language = settings.Append(wx.ID_ANY, "&语言", "语言")
self._append_separator(settings)
self.sqliteManageTool = settings.Append(wx.ID_ANY, "&SQLite3", "SQLite3")
def _init_menu_helps(self):
"""帮助"""
helps = wx.Menu()
self.topBar.Append(helps, "&帮助")
self.helpsORM = helps.Append(wx.ID_ANY, "&AUTO-ORM", "AUTO-ORM")
self._append_separator(helps)
self.helpsDocumentation = helps.Append(wx.ID_ANY, "&参考文档", "参考文档")
self._append_separator(helps)
self.helpsSeeOrKill = helps.Append(wx.ID_ANY, "&进程", "进程")
self._append_separator(helps)
self.menuAbout = helps.Append(wx.ID_ANY, "&关于", "关于")
def _init_menu_run(self):
"""运行"""
portProgress = wx.Menu()
self.topBar.Append(portProgress, "&运行")
speeder = wx.Menu()
portProgress.Append(wx.ID_ANY, "&镜像源", speeder)
self.portProgressFaster = speeder.Append(wx.ID_ANY, "&一键配置", "一键配置")
self._append_separator(portProgress)
virtualenv = wx.Menu()
portProgress.Append(wx.ID_ANY, "&虚拟环境", virtualenv)
self.portProgressVirtual = virtualenv.Append(wx.ID_ANY, "&创建", "创建")
self._append_separator(virtualenv)
self.portProgressVirtualChoice = virtualenv.Append(wx.ID_ANY, "&绑定", "绑定")
self._append_separator(virtualenv)
self.portProgressVirtualView = virtualenv.Append(wx.ID_ANY, "&查看", "查看")
self._append_separator(portProgress)
self.portProgressRun = portProgress.Append(wx.ID_ANY, "&运行", "运行")
self.portProgressStop = portProgress.Append(wx.ID_ANY, "&停止", "停止")
self._append_separator(portProgress)
djangoOrder = wx.Menu()
portProgress.Append(wx.ID_ANY, "&原生指令", djangoOrder)
self.portProgressPipInstall = djangoOrder.Append(wx.ID_ANY, "&pip install", "pip install")
self.portProgressPipFreeze = djangoOrder.Append(wx.ID_ANY, "&pip freeze", "pip freeze")
self._append_separator(djangoOrder)
self.portProgressShell = djangoOrder.Append(wx.ID_ANY, "&shell(Django交互式界面)", "shell(Django交互式界面)")
self._append_separator(djangoOrder)
self.portProgressMakemigrations = djangoOrder.Append(wx.ID_ANY, "&makemigrations(数据迁移)", "makemigrations(数据迁移)")
self.portProgressMigrate = djangoOrder.Append(wx.ID_ANY, "&migrate(数据写入)", "migrate(数据写入)")
self.portProgressFlush = djangoOrder.Append(wx.ID_ANY, "&flush(数据清空)", "flush(数据清空)")
self.portProgressCollectstatic = djangoOrder.Append(wx.ID_ANY, "&collectstatic(静态文件收集)", "collectstatic(静态文件收集)")
self.portProgressCreatesuperuser = djangoOrder.Append(wx.ID_ANY, "&createsupersuer(创建管理员)", "createsupersuer(创建管理员)")
self._append_separator(portProgress)
progresser = wx.Menu()
portProgress.Append(wx.ID_ANY, "&进程", progresser)
self.portProgressKillProgress = progresser.Append(wx.ID_ANY, "&终止进程", "终止进程")
def _init_menu_quit(self):
"""退出"""
directExit = wx.Menu()
self.topBar.Append(directExit, "&退出")
self.btnDirectExit = directExit.Append(wx.ID_ANY, "&退出", "退出")
def _init_menu_percheck(self):
"""单项检测"""
perCheck = wx.Menu()
self.topBar.Append(perCheck, "&单项检测")
self.apps_check = perCheck.Append(wx.ID_ANY, "&应用程序", "应用程序")
self.urls_check = perCheck.Append(wx.ID_ANY, "&路由", "路由")
self.views_check = perCheck.Append(wx.ID_ANY, "&视图", "视图")
self.templates_check = perCheck.Append(wx.ID_ANY, "&模板", "模板")
self.forms_check = perCheck.Append(wx.ID_ANY, "&表单", "表单")
self.models_check = perCheck.Append(wx.ID_ANY, "&模型", "模型")
self.database_check = perCheck.Append(wx.ID_ANY, "&数据库", "数据库")
def _init_menu_perfix(self):
"""单项修复"""
perFix = wx.Menu()
self.topBar.Append(perFix, "&单项修复")
self.apps_fix = perFix.Append(wx.ID_ANY, "&应用程序", "应用程序")
self.urls_fix = perFix.Append(wx.ID_ANY, "&路由", "路由")
self.views_fix = perFix.Append(wx.ID_ANY, "&视图", "视图")
self.templates_fix = perFix.Append(wx.ID_ANY, "&模板", "模板")
self.forms_fix = perFix.Append(wx.ID_ANY, "&表单", "表单")
self.models_fix = perFix.Append(wx.ID_ANY, "&模型", "模型")
self.database_fix = perFix.Append(wx.ID_ANY, "&数据库", "数据库")
def _init_menu_admin(self):
"""管理中心"""
admin = wx.Menu()
self.topBar.Append(admin, "&后台管理中心")
self.adminGenerateBase = admin.Append(wx.ID_ANY, "&后台绑定模型", "后台绑定模型")
self._append_separator(admin)
self.adminRename = admin.Append(wx.ID_ANY, "&修改后台名称", "修改后台名称")
def _init_statusbar(self):
"""设置状态栏"""
'''
状态栏分为三份,比例为 1 : 2 : 1,0代表第一栏,以此类推。
'''
sb = self.CreateStatusBar(4)
self.SetStatusWidths([-1, -2, -5, -1]) # 后期扩展
self.SetStatusText("Ready", 0)
def _init_self_tools(self):
"""自定义工具条"""
'''
自定义工具条 - 整体
'''
toolPanel = wx.Panel(self.midPanel)
toolPanelSizer = wx.BoxSizer(wx.HORIZONTAL)
toolPanel.SetSizer(toolPanelSizer)
self.midPanelSizer.Add(toolPanel, 0, wx.EXPAND | wx.ALL, 0)
'''
自定义工具条 - 左侧
'''
toolLeftPanel = wx.Panel(toolPanel)
toolLeftPanelSizer = wx.BoxSizer(wx.HORIZONTAL)
toolLeftPanel.SetSizer(toolLeftPanelSizer)
toolPanelSizer.Add(toolLeftPanel, 0, wx.EXPAND | wx.ALL, 0)
self.btn_select_project = buttons.GenButton(toolLeftPanel, -1, label='选择Django项目')
toolLeftPanelSizer.Add(self.btn_select_project, 0, wx.EXPAND | wx.ALL, 0)
self.btn_check_project = buttons.GenButton(toolLeftPanel, -1, label='[一键]校验')
toolLeftPanelSizer.Add(self.btn_check_project, 0, wx.EXPAND | wx.ALL, 0)
self.btn_fixed_project = buttons.GenButton(toolLeftPanel, -1, label='[一键]修复')
toolLeftPanelSizer.Add(self.btn_fixed_project, 0, wx.EXPAND | wx.ALL, 0)
self.btn_config_project = buttons.GenButton(toolLeftPanel, -1, label='选项/修改')
toolLeftPanelSizer.Add(self.btn_config_project, 0, wx.EXPAND | wx.ALL, 0)
self.btn_clear_text = buttons.GenButton(toolLeftPanel, -1, label='清空')
toolLeftPanelSizer.Add(self.btn_clear_text, 0, wx.EXPAND | wx.ALL, 0)
self.btn_test = buttons.GenButton(toolLeftPanel, -1, label='测试按钮(后期删除)')
toolLeftPanelSizer.Add(self.btn_test, 0, wx.EXPAND | wx.ALL, 0)
self.btn_test.Show(False)
'''
自定义工具条 - 右侧
'''
toolRightPanel = wx.Panel(toolPanel)
toolRightPanelSizer = wx.BoxSizer(wx.HORIZONTAL)
toolRightPanel.SetSizer(toolRightPanelSizer)
toolPanelSizer.Add(toolRightPanel, 1, wx.EXPAND | wx.ALL, 0)
self.cmdInput = wx.TextCtrl(toolRightPanel, -1, size=(200, -1))
toolRightPanelSizer.Add(self.cmdInput, 1, wx.EXPAND | wx.ALL, 0)
self.btn_exec = buttons.GenButton(toolRightPanel, -1, '执行/Enter')
toolRightPanelSizer.Add(self.btn_exec, 0, wx.EXPAND | wx.ALL, 0)
def _init_ctrls(self):
"""初始化控制器"""
'''
全局控件控制
'''
self.needFonts.extend([self.infos, self.cmdInput,])
self.allInitBtns['global'][CON_CONTROL_CHECK].append(self.btn_check_project)
self.allInitBtns['global'][CON_CONTROL_FIX].append(self.btn_fixed_project)
self.allInitBtns['global'][CON_CONTROL_OTHER].extend([
self.btn_config_project, self.menusSettings,
])
'''
应用程序
'''
self.allInitBtns['apps'][CON_CONTROL_CREATE].append(self.menuGenerate)
self.allInitBtns['apps'][CON_CONTROL_CHECK].append(self.apps_check)
self.allInitBtns['apps'][CON_CONTROL_FIX].append(self.apps_fix)
'''
视图
'''
self.allInitBtns['views'][CON_CONTROL_CREATE].extend([
self.viewsGenerateFunc,
# 临时存放 开始
self.menuVSCode,
self.helpsORM,
# """可用性失效,后期需修复"""
self.shotcut_code,
self.shotcut_command,
self.shotcut_makemigration,
self.shotcut_migrate,
# 临时存放 结束
])
self.allInitBtns['views'][CON_CONTROL_CHECK].append(self.views_check)
self.allInitBtns['views'][CON_CONTROL_FIX].append(self.views_fix)
'''
路由
'''
self.allInitBtns['urls'][CON_CONTROL_CHECK].append(self.urls_check)
self.allInitBtns['urls'][CON_CONTROL_FIX].append(self.urls_fix)
'''
模板
'''
self.allInitBtns['templates'][CON_CONTROL_CHECK].append(self.templates_check)
self.allInitBtns['templates'][CON_CONTROL_FIX].append(self.templates_fix)
'''
表单
'''
self.allInitBtns['forms'][CON_CONTROL_CHECK].append(self.forms_check)
self.allInitBtns['forms'][CON_CONTROL_FIX].append(self.forms_fix)
'''
模型
'''
self.allInitBtns['models'][CON_CONTROL_CREATE].extend([self.modelsGenerate,self.modelsProxyGenerate,])
self.allInitBtns['models'][CON_CONTROL_CHECK].append(self.models_check)
self.allInitBtns['models'][CON_CONTROL_FIX].append(self.models_fix)
'''
数据库
'''
self.allInitBtns['database'][CON_CONTROL_CHECK].append(self.database_check)
self.allInitBtns['database'][CON_CONTROL_FIX].append(self.database_fix)
'''
管理中心
'''
self.allInitBtns['admin'][CON_CONTROL_CREATE].extend([
self.adminGenerateBase,
self.adminRename,
])
def _append_separator(self, obj):
"""添加分割线"""
obj.AppendSeparator()
def _append_separator_to_tools(self):
"""向系统工具栏添加不可点击分割按钮"""
self.sys_toolbar.AddSeparator()
# self.sys_toolbar.AddTool(wx.ID_ANY, "", wx.Bitmap(BITMAP_SPLIT_PATH), shortHelp='我是分割符').Enable(False)
|
import collections.abc as abc
import typing as typ
import aiohttp
from .chunked_stream import ChunkedBytesStream, JsonStream
from .contextlib import AsyncContextManager
StreamType = typ.TypeVar('StreamType', ChunkedBytesStream, JsonStream)
class StreamableResponse(typ.Generic[StreamType], abc.Awaitable,
abc.AsyncIterator, AsyncContextManager):
"""
A Response to a method that streams blocks of data.
.. warning::
This iterator is designed to yield each item one time; meaning sharing
this instance across multiple loops will cause each loop to recieve a
subset of the data. Users needing this sort of multiplexing should
implement it independently.
This warning also applies to reading from the respone's content as the
data is streamed.
"""
def __init__(self,
pending_response: typ.Awaitable[aiohttp.ClientResponse],
stream_class: typ.Type[StreamType] = JsonStream):
self.pending_response = pending_response
self.stream_class = stream_class
self.response = None
self.stream = None
async def ready(self) -> None:
"""
Waits for the response headers to be ready.
.. warning::
This method *does not* check the response status code or headers.
"""
if self.response is None:
response = await self.pending_response
# Second check necessary in the event multiple calls to ready are
# running at once.
if self.response is None:
self.response = response
self.stream = JsonStream(response)
# TODO: (not in this function) Check response headers.
async def get_response(self) -> aiohttp.ClientResponse:
"""
Wait for the response headers to be ready and then return the response.
This is the same response as used internally for iteration.
"""
await self.ready()
return self.response
async def get_stream(self) -> StreamType:
"""
Wait for the response headers to be ready and then return the stream.
This is the same stream as used internally for iteration.
"""
await self.ready()
return self.stream
async def complete(self):
async with self:
for unused in self:
pass
async def as_list(self, n: typ.Optional[int] = None):
"""
Iterates over the elements in this stream and return them as a list.
If ``n`` is ``None`` then return a list of all remaining items in the
stream. Otherwise return a list of at most ``n`` elements; a list of
less than ``n`` elements indicates the stream ended before reading
the ``n`` items.
Note that this element will not exit if this oject represents an
infinite stream.
"""
results = []
stream = self.__aiter__()
while True:
if n is not None and len(results) >= n:
return results
try:
item = await stream.__anext__()
except StopAsyncIteration:
return results
results.append(item)
async def __await__(self):
async with self:
return self.as_list()
async def __aenter__(self):
await self.ready()
await self.response.__aenter__()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.ready()
await self.response.__aexit__(exc_type, exc, tb)
async def __anext__(self):
await self.ready()
return self.stream.__anext__()
|
'''
Classifier : Logistic Regression
DataSet : TitanicDataset.csv
Features : Passenger id, Gender, Age, Fare, Class etc
Labels : -
Author : Prasad Dangare
Function Name : Titanic_Logistic
'''
# ===================
# Imports
# ===================
import numpy as np # use for multi-dimensional arrays.
import pandas as pd # use for data cleaning, analysis, data frames
import seaborn as sb # data visualization library based on matplotlib
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression # Importing LogisticRegression For Traning / Testing
from seaborn import countplot # countplot Show the counts of observations in each categorical bin using bars
import matplotlib.pyplot as plt # plotting library
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from matplotlib.pyplot import figure, show # figure()in pyplot module of matplotlib is used to create a new figure(object),
#show()in pyplot module of matplotlib is used to display all the figures.
# ===================
# ML Operation
# ===================
def Titanic_Logistic():
print("\nInside Titanic Logistic Function\n")
# *************************
# Step 1 : Load The Data
# *************************
Titanic_Data = pd.read_csv("TitanicDataset.csv")
print("\nFirst 5 Entities From Loaded Data Set : \n")
print(Titanic_Data.head()) # it shows first 5 records
print("\nNumber Of Passengers Are : " + str(len(Titanic_Data)))
# *************************
# Step 2 : Analyse The Data
# *************************
print("\nVisulation : Survived And Non Survived Passengers : ")
figure()
target = "Survived"
countplot(data = Titanic_Data, x = target).set_title("Survived Vs Non Survived") # data is the keyword argument
show()
print("\nVisulation : Survived And Non Survived Passengers Based On Gender : ")
figure()
target = "Survived"
countplot(data = Titanic_Data, x = target, hue = "Sex").set_title("Survived And Non Survived Passengers Based On Gender")
show()
print("\nVisulation : Survived And Non Survived Passengers Based On Passenger Class : ")
figure()
target = "Survived"
countplot(data = Titanic_Data, x = target, hue = "Pclass").set_title("Survived And Non Survived Passengers Based On Passenger Class")
show()
print("\nVisulation : Survived Vs Non Survived Based On Age : ")
figure()
Titanic_Data ["Age"].plot.hist().set_title("Survived Vs Non Survived Based On Age")
show()
print("\nVisulation : Survived Vs Non Survived Based On Fare : ")
figure()
Titanic_Data ["Fare"].plot.hist().set_title("Survived Vs Non Survived Based On Fare")
show()
# *************************************
# Step 3 : Data Cleaning
# : Data Modification / Data Rangling
# *************************************
Titanic_Data.drop("zero", axis = 1, inplace = True) # drop method he column/row delete karti, inplace ha zaga war delete kala
print("\nData After Column Removal Of zero : \n")
print(Titanic_Data.head(5))
Sex = pd.get_dummies(Titanic_Data["Sex"]) # get_dummies it like label encoder, it split the column into 2 parts male, female,
print("\nSex Column Classification As 0 And 1 : \n") # get_dummies It converts categorical data into dummy
print(Sex.head(5)) # get_dumies is use to clean the data
Sex = pd.get_dummies(Titanic_Data["Sex"], drop_first = True) # we remove the female column
print("\nSex Column After Removing Female Column : \n")
print(Sex.head(5))
Pclass = pd.get_dummies(Titanic_Data["Pclass"]) # it create 3 dummies as 1, 2, 3
print("\nPassenger Class Classification In 1,2,3 : \n")
print(Pclass.head(5))
Pclass = pd.get_dummies(Titanic_Data["Pclass"], drop_first = True)
print("\nAfter First Class Passenger Column Removal : \n")
print(Pclass.head(5))
# Concat Sex And Pclass Field In Our Data Set
Titanic_Data = pd.concat([Titanic_Data, Sex, Pclass], axis = 1)
print("\nData After Concination Sex And Pclass : \n")
print(Titanic_Data.head(5))
# Giving New Name To Concatenate Fields
Titanic_Data.rename(columns = {Titanic_Data.columns[9]: "New Sex" }, inplace = True)
Titanic_Data.rename(columns = {Titanic_Data.columns[10]: "2 Class" }, inplace = True)
Titanic_Data.rename(columns = {Titanic_Data.columns[11]: "3 Class" }, inplace = True)
print("\nData After Updation Of Names : \n")
print(Titanic_Data.head())
# Removing Un Necessary Fields
Titanic_Data.drop(["Sex", "sibsp", "Parch", "Embarked"], axis = 1, inplace = True) # axis = 1 means drop the column
print("\nData After Removal Of Columns Sex, sibsp, Parch, Embarked : \n")
print(Titanic_Data.head(5))
# Divide The Data Set Into x And y
x = Titanic_Data.drop("Survived", axis = 1)
y = Titanic_Data["Survived"]
# Split The Data For Traning And Testing Purpose
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.5)
obj = LogisticRegression(max_iter = 1000) # from max_iter we can increase the ITERATIONS LIMIT
# *****************************
# Step 4 : Train The Data Set
# *****************************
obj.fit(x_train, y_train)
# ***********************************
# Step 5 : Test / Train The Data Set
# ***********************************
output = obj.predict(x_test)
print("\nAccuracy Of Given Data Set is : \n")
print(accuracy_score(y_test, output))
print("\nConfusion Matrix Is : \n")
print(confusion_matrix(y_test, output))
# =======================
# Entry Point
# =======================
def main():
print("_____ Titanic Logistic Case Study _____")
print("___ Using Logistic Regression ___")
Titanic_Logistic()
# ===================
# Starter
# ===================
if __name__ == "__main__":
main()
|
from . import forms, models
from django.shortcuts import render
from django.contrib.auth import decorators
from django.http import HttpResponse
from pyexcelerate import Workbook, Style, Font
import requests
# cache key format <model name in uppercase>-<start-date>-<end-date>
def payments_list(request):
return render(request, "enquiry/payments.html")
@decorators.login_required
def ota_detail(request, pk):
obj_data = models.OTA.objects.get(pk=pk)
form = forms.OTAForm(instance=obj_data)
return render(request, "enquiry/ota_detail.html", {"form": form,
"title": obj_data.name})
@decorators.login_required
def partner_detail(request, pk):
obj_data = models.Partner.objects.get(pk=pk)
form = forms.PartnerForm(instance=obj_data)
return render(request, "enquiry/partner_detail.html",
{"form": form,
"title": obj_data.name})
@decorators.login_required
def review_detail(request, pk):
obj_data = models.Review.objects.get(pk=pk)
form = forms.ReviewForm(instance=obj_data)
return render(request, "enquiry/review_detail.html",
{"form": form,
"title": obj_data.headline})
@decorators.login_required
def ota_list(request):
return render(request, "enquiry/ota_list.html", {"form": forms.OTAForm()})
@decorators.login_required
def partner_list(request):
return render(request, "enquiry/partner_list.html",
{"form": forms.PartnerForm()})
@decorators.login_required
def review_list(request):
return render(request, "enquiry/review_list.html",
{"form": forms.ReviewForm()})
def master_download(request):
return render(request, "enquiry/master_data.html")
def excel_apply_formula(sheet, t_row, col_ind: iter, n_columns: iter, operation, s_row, e_row):
for i in range(len(col_ind)):
sheet.set_cell_value(
t_row, col_ind[i], f"={operation}({n_columns[i]}{s_row}:{n_columns[i]}{e_row})"
)
@decorators.login_required
def export_master_excel(request, month, year):
# paymentautoaudit is the name of the other container running separately on port 8000
res = requests.get(f"http://paymentautoaudit:8000/masterdata/{month}/{year}")
data = res.json()
print(res.status_code)
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = f'attachment; filename="{data["metadata"]["filename"]}"'
wb = Workbook()
ws = wb.new_sheet(data["metadata"]["tab1_name"])
ws[1].value = [['MVR']]
ws[2].value = [[f'01-{month}-{year}']]
ws.range("A1", "Z1").merge()
ws[1][1].style.font.bold = True
ws[1][1].style.font.size = 22
ws[1][1].style.alignment.horizontal = "center"
ws.set_row_style(1, Style(size=30))
ws.range("A2", "Z2").merge()
ws[2][1].style.alignment.horizontal = "center"
ws[2][1].style.font.bold = True
ws.range("A3", "Z3").merge()
ws[3].value = []
ws[4].value = [data["tab1_table1"]["headers"]]
ws.set_row_style(4, Style(size=25, font=Font(bold=True)))
ws.set_col_style(list(range(1, 29)), Style(size=-1))
t1_row_end = 5 + len(data["tab1_table1"]["data"][0].keys()) - 1
ws[5:t1_row_end].value = data["tab1_table1"]["data"][0].values()
ws[t1_row_end+1].value = [['Total']]
ws.set_row_style(t1_row_end+1, Style(size=25, font=Font(bold=True),))
ws.range(f"A{t1_row_end+1}", f"I{t1_row_end+1}").merge()
ws[t1_row_end+1][1].style.font.bold = True
ws[t1_row_end+1][1].style.alignment.horizontal = "center"
excel_apply_formula(ws, t1_row_end+1, (10, 11, 12, 18, 19, 20),
("J", "K", "L", "S", "T", "U"), "SUM", 5, t1_row_end)
t2_start = ws.num_rows + 2
ws[t2_start+1].value = [['ANALYSIS']]
ws.range(f"A{t2_start+1}", f"J{t2_start+1}").merge()
ws[t2_start+1][1].style.font.bold = True
ws[t2_start+1][1].style.font.size = 14
ws[t2_start+1][1].style.alignment.horizontal = "center"
ws[t2_start+2].value = [data["tab1_table2"]["headers"]]
ws[t2_start+3].value = []
ws[t2_start+4].value = [[' ', ' ROOMS ANALYSIS']]
ws[t2_start+4][2].style.font.bold = True
ws.set_row_style(t2_start+2, Style(size=35, font=Font(bold=True)))
ws.set_col_style(list(range(1, 27)), Style(size=-1))
ws[t2_start+7].value = [['', 'RESTAURANT ANALYSIS']]
ws[t2_start+7][2].style.font.bold = True
ws.set_row_style(t2_start+10, Style(size=20))
ws[t2_start+5:t2_start+6].value = [data["tab1_table2"]["data"][0]["row1"], data["tab1_table2"]["data"][0]["row2"]]
ws[t2_start+8:t2_start+10].value = [data["tab1_table2"]["data"][0]["row3"], data["tab1_table2"]["data"][0]["row4"],
data["tab1_table2"]["data"][0]["row5"]]
# New worksheet
ws1 = wb.new_sheet(data["metadata"]["tab2_name"])
ws1.range("B2", "E2").value = [['CUSTOMER DETAILS']]
ws1[2][2].style.font.bold = True
ws1[2][2].style.alignment.horizontal = "center"
ws1.range("B2", "E2").merge()
ws1.range("F2", "AE2").value = [['PRE CHECK IN']]
# ws1.set_cell_style(2, 3, Style(font=Font(bold=True)))
ws1[2][6].style.font.bold = True
ws1[2][6].style.alignment.horizontal = "center"
ws1.range("F2", "AE2").merge()
ws1.range("AH2", "AN2").value = [['POST CHECK IN']]
ws1[2][34].style.font.bold = True
ws1[2][34].style.alignment.horizontal = "center"
ws1.range("AH2", "AN2").merge()
ws1[2][42].value = 'Cash'
ws1[3][1].value = ''
ws1.range("B3", "AQ3").value = [data["tab2_table1"]["headers"]]
ws1[3][2].style.font.bold = True
ws1.set_row_style(3, Style(size=30, font=Font(bold=True)))
ws1.set_col_style(list(range(1, 43)), Style(size=-1))
tt1_row_end = 4 + len(data["tab2_table1"]["data"][0].keys()) - 1
ws1.range(f"B{tt1_row_end+1}", f"E{tt1_row_end+1}").value = [['Total']]
ws1[tt1_row_end+1][2].style.font.bold = True
ws1[tt1_row_end+1][2].style.alignment.horizontal = "center"
ws1.range(f"B{tt1_row_end+1}", f"E{tt1_row_end+1}").merge()
excel_apply_formula(ws1, tt1_row_end+1,
[10, 11, 12, 18, 19, 21, 22, 23, 25, 26, 27, 38, 39, 40, 42, 43],
["J", "K", "L", "R", "S", "U", "V", "W", "Y", "Z", "AA", "AL", "AM", "AN", "AP", "AQ"],
"SUM", s_row=4, e_row=tt1_row_end)
formula_string = "=N{0}+U{0}+Y{0}+AC{0}+AG{0}+AH{0}+AL{0}+AP{0}"
for i in range(4, tt1_row_end):
ws1.set_cell_value(i, 43, formula_string.format(i))
ws1.set_row_style(tt1_row_end+1, Style(font=Font(bold=True)))
ws1[4:tt1_row_end].value = data["tab2_table1"]["data"][0].values()
tt2_start = ws1.num_rows + 3
ws1.range(f"B{tt2_start+1}", f"E{tt2_start+1}").value = [data["tab2_table2"]["headers"]]
ws1.set_col_style(list(range(1, 4)), Style(size=-1))
ws1.set_row_style(tt2_start+1, Style(size=30, font=Font(bold=True)))
ws1[tt2_start+2:tt2_start+11].value = data["tab2_table2"]["data"][0].values()
print(data["tab2_table2"]["data"][0].values())
ws1[ws1.num_rows+1][2].value = 'TOTAL'
ws1[ws1.num_rows][2].style.font.bold = True
excel_apply_formula(ws1, tt2_start+12, [3, 4, 5], ["C", "D", "E"], "SUM", tt2_start+2, ws1.num_rows-1)
ws1.set_row_style(ws1.num_rows, Style(size=25, font=Font(bold=True)))
wb.save(response)
return response
if __name__ == "__main__":
res = requests.get("http://localhost:8001/masterdata/")
print(res.status_code)
|
"""
How to use Asyncio with InfluxDB client.
"""
import asyncio
from datetime import datetime
from influxdb_client import Point
from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
async def main():
async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
"""
Check the version of the InfluxDB
"""
version = await client.version()
print(f"\n------- Version -------\n")
print(f"InfluxDB: {version}")
"""
Prepare data
"""
print(f"\n------- Write data by async API: -------\n")
write_api = client.write_api()
_point1 = Point("async_m").tag("location", "Prague").field("temperature", 25.3)
_point2 = Point("async_m").tag("location", "New York").field("temperature", 24.3)
successfully = await write_api.write(bucket="my-bucket", record=[_point1, _point2])
print(f" > successfully: {successfully}")
"""
Query: List of FluxTables
"""
query_api = client.query_api()
print(f"\n------- Query: List of FluxTables -------\n")
tables = await query_api.query('from(bucket:"my-bucket") '
'|> range(start: -10m) '
'|> filter(fn: (r) => r["_measurement"] == "async_m")')
for table in tables:
for record in table.records:
print(f'Temperature in {record["location"]} is {record["_value"]}')
"""
Query: Stream of FluxRecords
"""
print(f"\n------- Query: Stream of FluxRecords -------\n")
query_api = client.query_api()
records = await query_api.query_stream('from(bucket:"my-bucket") '
'|> range(start: -10m) '
'|> filter(fn: (r) => r["_measurement"] == "async_m")')
async for record in records:
print(record)
"""
Query: Pandas DataFrame
"""
print(f"\n------- Query: Pandas DataFrame -------\n")
query_api = client.query_api()
dataframe = await query_api.query_data_frame('from(bucket:"my-bucket") '
'|> range(start: -10m) '
'|> filter(fn: (r) => r["_measurement"] == "async_m")'
' |> group()')
print(dataframe)
"""
Query: String output
"""
print(f"\n------- Query: String output -------\n")
query_api = client.query_api()
raw = await query_api.query_raw('from(bucket:"my-bucket") '
'|> range(start: -10m) '
'|> filter(fn: (r) => r["_measurement"] == "async_m")')
print(raw)
"""
Delete data
"""
print(f"\n------- Delete data with location = 'Prague' -------\n")
successfully = await client.delete_api().delete(start=datetime.utcfromtimestamp(0), stop=datetime.now(),
predicate="location = \"Prague\"", bucket="my-bucket")
print(f" > successfully: {successfully}")
if __name__ == "__main__":
asyncio.run(main())
|
class JobMetricConsumer(object):
"""
Class to consume the 'stage_log_middleware' channel.
Passes the metric and value to a Redis list based on job_id
Arguments:
redis {redis.Redis} -- A Redis connection object
"""
def __init__(self, redis):
self._redis = redis
def call(self, message, timestamp, meta_data):
"""
Adds metric names to redis list
Arguments:
message {dict} -- Event attributes
timestamp {int} -- The time the event was created
meta_data {dict} -- Additional data about the event
"""
from foundations_internal.fast_serializer import serialize
job_id = message['job_id']
key = 'jobs:{}:metrics'.format(job_id)
metric_key = message['key']
metric_value = message['value']
value = (timestamp, metric_key, metric_value)
self._redis.rpush(key, serialize(value))
|
from rest_framework import status
from rest_framework.response import Response
from django_pds.core.pds.generic import data_read, basic_data_read, data_insert, data_update, data_delete
from django_pds.core.rest.decorators import required
from django_pds.core.rest.response import error_response, success_response
from django_pds.core.rest.views import BaseAPIView
class GenericDeleteRestAPI(BaseAPIView):
# required decorator check request.data
# before calling the post method for these required params
@required("document_name", "document_id")
def post(self, request):
try:
# we are expecting payload with the request
document_name = request.data['document_name']
document_id = request.data['document_id']
# here the below user id is manually added
# for demonstration purpose
# you can extract an user id from the request
# or from the jwt token if you implement
user_id = '862bdaf0-6fa4-476e-be07-43ededfc222c'
# user id is an optional parameter if you want to ignore security
# if ignore_permission=True, then row level security will be ignored
# permission checking will be disabled
error, result = data_delete(document_name, document_id, user_id, ignore_permissions=False)
if error:
response = error_response(result)
return Response(response, status=status.HTTP_400_BAD_REQUEST)
response = success_response(result)
return Response(response, status=status.HTTP_400_BAD_REQUEST)
except BaseException as e:
response = error_response(str(e))
return Response(response, status=status.HTTP_400_BAD_REQUEST)
class RestInsert(BaseAPIView):
# required decorator check request.data
# before calling the post method for these required params
@required("document_name", "data")
def post(self, request):
try:
# we are expecting payload with the request
document_name = request.data['document_name']
data = request.data['data']
# as we are not checking row level security,
# ignoring offered row level security
error, result = data_insert(document_name, data, ignore_security=True)
if error:
response = error_response(result)
return Response(response, status=status.HTTP_400_BAD_REQUEST)
response = success_response(result)
return Response(response, status=status.HTTP_400_BAD_REQUEST)
except BaseException as e:
response = error_response(str(e))
return Response(response, status=status.HTTP_400_BAD_REQUEST)
class RestUpdateAPIView(BaseAPIView):
# required decorator check request.data
# before calling the post method for these required params
@required("document_name", "data")
def post(self, request):
try:
# we are expecting payload with the request
document_name = request.data['document_name']
data = request.data['data']
# here the below user id is manually added
# for demonstration purpose
# you can extract an user id from the request
# or from the jwt token if you implement
user_id = '862bdaf0-6fa4-476e-be07-43ededfc222c'
# user id is an optional parameter if you want to ignore security
# if ignore_security=True, then row level security will be ignored
error, result = data_update(document_name, data, ignore_security=True, user_id=user_id)
if error:
response = error_response(result)
return Response(response, status=status.HTTP_400_BAD_REQUEST)
response = success_response(result)
return Response(response, status=status.HTTP_400_BAD_REQUEST)
except BaseException as e:
response = error_response(str(e))
return Response(response, status=status.HTTP_400_BAD_REQUEST)
class GetBySQLFilter(BaseAPIView):
required("document_name", "query")
def post(self, request):
try:
params = request.data
document_name = params['document_name']
query = params['query']
# here the below user id is manually added
# for demonstration purpose
# you can extract an user id from the request
# or from the jwt token if you implement
user_id = '862bdaf0-6fa4-476e-be07-43ededfc222c'
error, data = data_read(document_name, query, user_id=user_id, readable=False, error_track=True)
return Response(data, status=status.HTTP_200_OK if not error else status.HTTP_400_BAD_REQUEST)
except BaseException as e:
return Response(error_response(str(e)), status=status.HTTP_400_BAD_REQUEST)
class GetBySQLFilter2(BaseAPIView):
def get(self, request):
try:
document_name = 'Award'
fields = ('ItemId', 'Title', 'Year', 'Description')
error, data_or_exception = basic_data_read(document_name, fields=fields)
return Response(data_or_exception, status=status.HTTP_200_OK if not error else status.HTTP_400_BAD_REQUEST)
except BaseException as e:
return Response(error_response(str(e)), status=status.HTTP_400_BAD_REQUEST)
class BasicDataReadRestAPI(BaseAPIView):
required("document_name", "fields")
def post(self, request):
try:
document_name = request.data['document_name']
fields = request.data['fields']
page_size = request.data.get('pageSize', 10) # optional params
page_number = request.data.get('pageNumber', 1) # optional params
order_by = request.data.get('order_by', []) # optional params
error, data_or_exception = basic_data_read(document_name, fields=fields,
page_size=page_size, page_num=page_number,
order_by=order_by)
return Response(data_or_exception,
status=status.HTTP_200_OK if not error else status.HTTP_400_BAD_REQUEST)
except BaseException as e:
return Response(error_response(str(e)), status=status.HTTP_400_BAD_REQUEST)
|
# import necessary libraries
import torch
import numpy as np
import ujson as json
import torch.nn as nn
import argparse
from pathlib import Path
from tokenizers import BertWordPieceTokenizer
from transformers import BertForSequenceClassification, BertConfig
from tqdm import tqdm
import numpy as np
import arguments.rank_arguments as arguments
from data_processing.articles import Articles
import data_processing.dictionaries as dictionary
import training.eval_util as eval_util
from training.collate import collate_fn
parser = argparse.ArgumentParser(description="Get Ranked Predictions on New Dataset.")
arguments.add_data(parser)
arguments.add_model(parser)
args = parser.parse_args()
if torch.cuda.is_available() and args.use_gpu:
device = "cuda"
elif not args.use_gpu:
device = "cpu"
else:
device = "cpu"
print("Cannot use GPU. Using CPU instead.")
print(f"Device: {device}")
print("-------------------")
# set output directory path
output_path = Path(args.output_dir)
# load in dataset
raw_data_path = Path(args.dataset_path)
raw_data = Articles(raw_data_path)
print("Data Loaded")
print("-------------------")
# load dictionaries from path
dictionary_dir = Path(args.dict_dir)
final_word_ids, final_url_ids, final_publication_ids = dictionary.load_dictionaries(
dictionary_dir
)
print("Dictionaries Loaded")
print("-------------------")
# map items to their dictionary values
if args.map_items:
# initialize tokenizer from BERT library
tokenizer = BertWordPieceTokenizer(args.tokenizer_file, lowercase=True)
print("Tokenizer Initialized!")
# tokenize and map items to their ids in dictionaries and filter articles
proper_data = raw_data.map_items(
tokenizer,
final_word_ids,
final_url_ids,
final_publication_ids,
filter=True,
min_length=args.min_article_length,
day_range=args.days_old,
)
print("Mapped and Filtered Data!")
data_path = Path(args.data_dir)
if not data_path.is_dir():
data_path.mkdir()
mapped_data_path = data_path / "mapped-data"
print("Initial: ", len(raw_data))
if not mapped_data_path.is_dir():
mapped_data_path.mkdir()
train_mapped_path = mapped_data_path / "mapped_dataset.json"
with open(train_mapped_path, "w") as file:
json.dump(proper_data, file)
raw_data = Articles(train_mapped_path)
print("Final: ", len(raw_data))
print(f"Filtered, Mapped Data saved to {mapped_data_path} directory")
print("-------------------")
def collate_fn(examples):
words = []
articles = []
labels = []
publications = []
for example in examples:
if args.use_all_words:
words.append(list(set(example["text"])))
else:
if len(example["text"]) > args.words_to_use:
words.append(list(set(example["text"][: args.words_to_use])))
else:
words.append(list(set(example["text"])))
articles.append(example["url"])
publications.append(example["model_publication"])
labels.append(example["model_publication"])
num_words = [len(x) for x in words]
words = np.concatenate(words, axis=0)
word_attributes = torch.tensor(words, dtype=torch.long)
articles = torch.tensor(articles, dtype=torch.long)
num_words.insert(0, 0)
num_words.pop(-1)
attribute_offsets = torch.tensor(np.cumsum(num_words), dtype=torch.long)
publications = torch.tensor(publications, dtype=torch.long)
real_labels = torch.tensor(labels, dtype=torch.long)
return publications, articles, word_attributes, attribute_offsets, real_labels
# change negative example publication ids to the ids of the first half for predictions
def collate_with_neg_fn(examples):
(
publications,
articles,
word_attributes,
attribute_offsets,
real_labels,
) = collate_fn(examples)
publications[len(publications) // 2 :] = publications[: len(publications) // 2]
return publications, articles, word_attributes, attribute_offsets, real_labels
# Generates a dataloader on the dataset that outputs entire set as a batch for one time predictions
raw_loader = torch.utils.data.DataLoader(
raw_data, batch_size=args.data_batch_size, collate_fn=collate_fn, pin_memory=pin_mem
)
abs_model_path = Path(args.model_path)
kwargs = dict(
n_publications=len(final_publication_ids),
n_articles=len(final_url_ids),
n_attributes=len(final_word_ids),
emb_size=args.emb_size,
sparse=args.use_sparse,
use_article_emb=args.use_article_emb,
mode=args.word_embedding_type,
)
model = InnerProduct(**kwargs)
model.load_state_dict(torch.load(abs_model_path))
model.to(device)
print("Model Loaded")
print(model)
print("-------------------")
# get final evaluation results and create a basic csv of top articles
data_logit_list = []
for batch in tqdm(raw_data):
current_logits = eval_util.calculate_batched_predictions(
batch, model, device, args.target_publication
)
data_logit_list = data_logit_list + list(current_logits)
converted_list = np.array(eval_logit_list)
sorted_preds = np.sort(converted_list)
indices = np.argsort(converted_list)
ranked_df = eval_util.create_ranked_results_list(
final_word_ids, sorted_preds, indices, raw_data
)
eval_util.save_ranked_df(output_path, "evaluation", ranked_df, args.word_embedding_type)
print("Predictions Made")
print(f"Ranked Data Saved to {output_path / 'results' / 'evaluation'} directory!")
|
import os.path
from collections import defaultdict
from graph import djikstra
def _parse_orbits(lines):
orbits = {}
for line in lines:
a, b = line.strip().split(')')
if b in orbits:
raise ValueError(f'object {b} orbits around more than one object')
orbits[b] = a
return orbits
def _build_neighbor_map(orbits):
neighbors = defaultdict(list)
for k, v in orbits.items():
neighbors[k].append(v)
neighbors[v].append(k)
return neighbors
def _read_input():
with open(os.path.basename(__file__).replace('.py', '.txt')) as f:
return _parse_orbits(f.readlines())
def _count(obj, orbits):
if obj not in orbits or obj == 'COM':
return 0
return 1 + _count(orbits[obj], orbits)
def part1(orbits):
n = 0
for k in orbits.keys():
n += _count(k, orbits)
return n
def part2(neighbors):
dist, _ = djikstra(neighbors, target='SAN', source='YOU')
return dist['SAN'] - 2 # subtract 2 for source and target node
orbits = _read_input()
neighbors = _build_neighbor_map(orbits)
print(part1(orbits))
print(part2(neighbors))
############
# Tests
# fmt: off
example = _parse_orbits('''COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
'''.splitlines())
example2 = _parse_orbits('''COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN
'''.splitlines())
example_neighbors = _build_neighbor_map(example2)
# fmt: on
def test_count():
assert _count('COM', example) == 0
assert _count('D', example) == 3
assert _count('L', example) == 7
def test_neighbour_map():
assert _build_neighbor_map(example)['COM'] == ['B']
assert sorted(_build_neighbor_map(example)['D']) == ['C', 'E', 'I']
assert sorted(_build_neighbor_map(example)['B']) == ['C', 'COM', 'G']
def test_example():
assert part1(example) == 42
assert part2(example_neighbors) == 4
def test_solution():
assert part1(orbits) == 300598
assert part2(neighbors) == 520
|
import unittest
from .util import TrelloElementMock
from trello_collection import TrelloCollection
class TrelloCollectionTests(unittest.TestCase):
def setUp(self):
self.collection = TrelloElementMock.collection()
self.trello_collection = TrelloCollection(self.collection)
def test_it_filters_the_closed_elements(self):
collection = TrelloElementMock.collection()
collection[0].closed = True
trello_collection = TrelloCollection(collection)
self.assertEqual(len(trello_collection.elements), len(collection) - 1)
def test_names_returns_every_name_from_the_collection(self):
self.assertEqual(self.trello_collection.names(), ["first", "second"])
def test_find_gets_an_element_from_the_collection_by_index(self):
self.assertEqual(self.trello_collection.find(1), self.collection[1])
def test_on_init_it_gets_the_property_from_the_trello_element(self):
trello_element = TrelloElementMock("card")
trello_element.collection = TrelloElementMock.collection()
collection = TrelloCollection(trello_element, "collection")
self.assertEqual(collection.elements, trello_element.collection)
if __name__ == '__main__':
unittest.main()
|
"""The ASGI config for the project. """
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'conf.docker')
application = get_asgi_application()
|
"""Vendor-specific extensions of the `dicomweb_client` package."""
|
from models.user import AppUserModel
from resources.mixin import ActivateMixin, ListMixin, ResourceMixin
class User(ResourceMixin):
model = AppUserModel
parsed_model = model.parse_model()
class ActivateUser(ActivateMixin):
model = AppUserModel
class Users(ListMixin):
model = AppUserModel
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CheckResultList import CheckResultList
class KoubeiQualityTestShieldResultSyncModel(object):
def __init__(self):
self._batch_no = None
self._check_result_list = None
self._order_id = None
self._out_biz_no = None
self._partner_id = None
self._pay_style = None
self._shop_id = None
@property
def batch_no(self):
return self._batch_no
@batch_no.setter
def batch_no(self, value):
self._batch_no = value
@property
def check_result_list(self):
return self._check_result_list
@check_result_list.setter
def check_result_list(self, value):
if isinstance(value, list):
self._check_result_list = list()
for i in value:
if isinstance(i, CheckResultList):
self._check_result_list.append(i)
else:
self._check_result_list.append(CheckResultList.from_alipay_dict(i))
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def pay_style(self):
return self._pay_style
@pay_style.setter
def pay_style(self, value):
self._pay_style = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
def to_alipay_dict(self):
params = dict()
if self.batch_no:
if hasattr(self.batch_no, 'to_alipay_dict'):
params['batch_no'] = self.batch_no.to_alipay_dict()
else:
params['batch_no'] = self.batch_no
if self.check_result_list:
if isinstance(self.check_result_list, list):
for i in range(0, len(self.check_result_list)):
element = self.check_result_list[i]
if hasattr(element, 'to_alipay_dict'):
self.check_result_list[i] = element.to_alipay_dict()
if hasattr(self.check_result_list, 'to_alipay_dict'):
params['check_result_list'] = self.check_result_list.to_alipay_dict()
else:
params['check_result_list'] = self.check_result_list
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.pay_style:
if hasattr(self.pay_style, 'to_alipay_dict'):
params['pay_style'] = self.pay_style.to_alipay_dict()
else:
params['pay_style'] = self.pay_style
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiQualityTestShieldResultSyncModel()
if 'batch_no' in d:
o.batch_no = d['batch_no']
if 'check_result_list' in d:
o.check_result_list = d['check_result_list']
if 'order_id' in d:
o.order_id = d['order_id']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'pay_style' in d:
o.pay_style = d['pay_style']
if 'shop_id' in d:
o.shop_id = d['shop_id']
return o
|
#!/usr/bin/env python2
import cv2 as cv
import numpy as np
import glob, os
from sys import argv
kx = 0.1
kx2 = 1 - kx
def boundingRect_sel (rect, img_shape):
x, y, sw, sh = rect
h, w = img_shape
if ((kx * w < x) and (kx * h < y) and
(kx2 * w > (x + sw)) and ((y + sh) < kx2 * h)):
return True
return False
lenk = 1.0
def contours_selection(cnts, img_shape):
w, h = img_shape
minl = lenk/2 * np.sqrt(w * w + h * h)
for i in xrange(0, len(cnts)):
if ((minl < cv.arcLength(cnts[i], True)) and
boundingRect_sel(cv.boundingRect(cnts[i]), img_shape)):
return i
return None # len(cnts) - 1
def parse_file_metadata(f):
nam = os.path.splitext(os.path.basename(f))[0]
l = nam.split('.')
obj = l[0]
ops = 'none'
wid = 0
if len(l) == 3:
ops = l[1]
wid = int(l[2])
else:
wid = int(l[1])
return obj, ops, wid
def contour_data_record(f, cnt):
obj, ops, wid = parse_file_metadata(f)
moments = cv.HuMoments(cv.moments(cnt))
line = "{:>18} {:>4}".format(obj, str(wid))
for m in moments:
line += " {:>20.8e}".format(m[0])
line += " {:<}".format(ops)
return line
def marked_file (path):
des, file = os.path.split(path)
return des + "/marked/" + file
def handle_file (path):
img = cv.cvtColor(cv.imread(path), cv.COLOR_RGB2GRAY)
img[img < 255] = 0
cnts, h =cv.findContours(img,
cv.RETR_LIST,
cv.CHAIN_APPROX_NONE)
ci = contours_selection(cnts, img.shape)
if ci is not None:
img = cv.imread(path)
cv.drawContours(img, cnts, ci,
(112,181,27),
thickness = 2)
cv.imwrite(marked_file(path), img)
print(contour_data_record(path, cnts[ci]))
if __name__ == "__main__":
for p in glob.glob("./tmp/*.png"):
handle_file(p)
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 5 20:51:41 2022
@author: ywjin0707
"""
sc.pp.normalize_total(mydata, target_sum = 1e4)
sc.pp.log1p(mydata)
sc.pp.highly_variable_genes(mydata)
mydata = mydata[:, mydata.var.highly_variable]
def simulate_bulk(datasets: list, n: int = 10000, c: int = 500, sf: int = 100):
k = len(datasets)
Nprop = np.random.dirichlet(alpha=[1]*k, size=n)
for prop in Nprop:
bulk_sample = [ds[ds.obs.index.isin(np.random.randint(low=0, high=ds.n_obs, size=prop).tolist())] for ds in datasets]
bulk_sample = ad.concat(bulk_sample)
|
import inspect
import io
import friendly_traceback.core
import rich.console
import rich.traceback
import markdown
from contextlib import redirect_stderr, redirect_stdout
from io import StringIO
import os
import sys
try:
import hebrew_python
except ImportError:
hebrew_python = None
console = rich.console.Console(
color_system="truecolor", force_terminal=True, record=True, file=StringIO())
globals_exec = {
"console": console,
"os": os,
"sys": sys,
}
def run_code(code, hebrew_mode=False):
if hebrew_mode:
if not hebrew_python.hook.hebrew_builtins:
hebrew_python.hook.setup()
try:
rich.traceback.open = lambda file, *args, **kwargs: io.StringIO(code) if file == "/program.py" else open(file,
*args,
**kwargs) # hook rich open
if hebrew_mode:
code = hebrew_python.hook.transform_source(code)
c = compile(code, "program.py", "exec")
if hebrew_mode:
# builtins.__dict__.update(hebrew_python.hook.hebrew_builtins)
__builtins__.__dict__.update(hebrew_python.hook.hebrew_builtins)
# all_builtins.update(builtins.__dict__)
exec(c, globals_exec, {})
return {}
except (Exception, SystemExit, SyntaxError):
exc_type, exc_value, tb = sys.exc_info()
tb = rich.traceback.Traceback.from_exception(
exc_type, exc_value, tb, show_locals=True)
# tb.suppress = ["<exec>"] # FIXME : this not work
tb.trace.stacks[0].frames.pop(0)
# console.print_exception()
console.print(tb)
fr = friendly_traceback.core.FriendlyTraceback(*sys.exc_info())
fr.compile_info()
generic = fr.info.get("generic", '')
cause = fr.info.get("cause", '')
suggest = fr.info.get("suggest", '')
if suggest:
suggest = "\n" + suggest
text = f'{generic}\n{suggest}\n{cause}'
html = markdown.markdown(text)
ret = {"error": console.export_html() + "\n\n" + html,
"shell": inspect.stack()}
return ret
if __name__ == "__main__":
if "MAIN" in os.environ:
rich.print(run_code("."))
|
# O mobile na sala da Maria e composto de tres hastes exatamente como na figura abaixo.
# Para que ele esteja completamente equilibrado, com todas as hastes na horizontal, os pesos das
# quatro bolas A, B, C e D tem que satisfazer todas as seguintes tres condicoes:
# A = B + C + D
# B + C = D
# B = C
Pa, Pb, Pc, Pd = map(int, input().split(" "))
if Pa == Pb + Pc + Pd and Pb + Pc == Pd and Pb == Pc:
print("Está em Equilíbrio")
else:
print("Não está em Equilíbrio")
|
# pass
def bubble_sort(collection):
length = len(collection)
for i in range (length - 1):
swapped = False
for j in range(length - i - 1):
if (collection[j] > collection[j + 1]):
collection[j], collection[j + 1] = collection[j + 1], collection[j]
swapped = True
if not swapped:
break
return collection
if __name__ == "__main__":
import time
user_input = input("Enter numbers separated by a comma:").strip()
unsorted = [int(item) for item in user_input.split(",")]
start = time.process_time()
print(*bubble_sort(unsorted), sep=",")
print(f"Processing time: {time.process_time() - start}")
|
class cuenta_bancaria:
def __init__(self, titular,fondos):
if isinstance(titular,str):
self.titular=titular
else:
self.titular="nombre"
if isinstance(fondos,float):
self.fondos=fondos
else:
self.fondos=0
def imprimir(self):
print("el cliente",self.titular,"tiene",self.fondos)
def ingresar(self,x):
if x<0:
print("cantidad erronea")
else:
self.fondos=x+self.fondos
print("ahora tienes",self.fondos)
def retirar(self,y):
if y>self.fondos:
print("retirada cancelada")
elif y<0:
print("cantidad erronea")
else:
self.fondos=self.fondos-y
print("te quedan",self.fondos)
cuenta_1=cuenta_bancaria("javi",500.0)
cuenta_1.imprimir()
cuenta_1.ingresar(400.0)
cuenta_1.retirar(100.0)
cuenta_1.imprimir()
|
import json
import os.path as path
import sys
import getopt
class Param(object):
"""class to set up the default parameters value of the model
the default parameters are stored in a json file along with
their definition
launch the code with the -h to get the help on all the model
parameters, e.g.
python vortex.py -h
"""
def __init__(self, defaultfile):
"""defaultfile is a sequel, it's no longer used the default file is
systematically the defaults.json located in the fluid2d/core
"""
import grid
d = path.dirname(grid.__file__)
jasonfile = d+'/defaults.json'
with open(jasonfile) as f:
namelist = json.load(f)
self.set_parameters(namelist)
opts, args = getopt.getopt(str(sys.argv[1:]), 'h:v', [''])
if '-h' in args:
self.manall()
sys.exit()
if '-v' in args:
self.print_param = True
else:
self.print_param = False
def set_parameters(self, namelist):
avail = {}
doc = {}
for d in namelist.keys():
dd = namelist[d]
for name in dd.keys():
val = dd[name]['default']
# print(name, val)
setattr(self, name, val)
if 'avail' in dd[name]:
avail[name] = dd[name]['avail']
if 'doc' in dd[name]:
doc[name] = dd[name]['doc']
self.avail = avail
self.doc = doc
def man(self, name):
if name in self.doc:
helpstr = self.doc[name]
if name in self.avail:
availstr = ', '.join([str(l) for l in self.avail[name]])
helpstr += ' / available values = ['+availstr+']'
else:
helpstr = 'no manual for this parameter'
name = '\033[0;32;40m' + name + '\033[0m'
print(' - "%s" : %s\n' % (name, helpstr))
def manall(self):
ps = self.listall()
for p in ps:
self.man(p)
def checkall(self):
for p, avail in self.avail.items():
if getattr(self, p) in avail:
# the parameter 'p' is well set
pass
else:
msg = 'parameter "%s" should in ' % p
msg += str(avail)
raise ValueError(msg)
def listall(self):
""" return the list of all the parameters"""
ps = [d for d in self.__dict__ if not(d in ['avail', 'doc'])]
return ps
def printvalues(self):
""" print the value of each parameter"""
for d in self.__dict__.keys():
if not(d in ['avail', 'doc']):
print('%20s :' % d, getattr(self, d))
def copy(self, obj, list_param):
""" copy attributes listed in list_param to obj
On output it returns missing attributes
"""
missing = []
for k in list_param:
if hasattr(self, k):
setattr(obj, k, getattr(self, k))
else:
missing.append(k)
return missing
if __name__ == "__main__":
param = Param('default.xml')
print('liste of parameters')
print(param.listall())
# to have the documentation on one particular parameter
param.man('beta')
# to get the documentation on all the parameters
param.manall()
# to check that all parameters that should a value taken from a list
# have an acceptable value
param.checkall()
|
from .. import SNG
from .. xls import get_xlwt
from .. xls import main
from unittest import mock
import builtins
import pytest
import xlrd
import xlwt
def test_xls__get_xlwt__1():
"""It returns the `xlwt` module."""
assert get_xlwt() == xlwt
def test_xls__get_xlwt__2(capfd):
"""It exits with an error message if xlwt cannot be imported."""
with pytest.raises(SystemExit):
with mock.patch.object(
builtins, '__import__', side_effect=ImportError):
get_xlwt()
out, err = capfd.readouterr()
assert out.startswith(
'You have to install the `xls` extra for this feature.')
def test_xls__main__1(capfd):
"""It requires a source directory."""
with pytest.raises(SystemExit) as err:
main(['i-do-not-exist'])
assert '2' == str(err.value)
out, err = capfd.readouterr()
assert err.strip().endswith("'i-do-not-exist' is not a valid path.")
def test_xls__main__2(tmpdir, capfd):
"""It requires a readable source directory."""
dir = tmpdir.mkdir('not-readable')
dir.chmod(0o000)
with pytest.raises(SystemExit) as err:
main([str(dir)])
assert '2' == str(err.value)
out, err = capfd.readouterr()
assert err.strip().endswith("/not-readable' is not a readable dir.")
def test_xls__main__3(tmpdir, capfd):
"""It requires a target file."""
with pytest.raises(SystemExit) as err:
main([str(tmpdir)])
assert '2' == str(err.value)
out, err = capfd.readouterr()
assert err.strip().endswith(
"the following arguments are required: dest_file")
def test_xls__main__4(tmpdir, caplog):
"""It writes titles and numbers alphabetically sorted to an XLS file."""
caplog.clear()
base_dir = tmpdir.mkdir('sb-files')
s1 = SNG()
s1.update({
'Title': 'Beta',
'Songbook': 'SB 23',
'ChurchSongID': 'CS 45',
'Text': 'song text'
})
s1.export(base_dir.join('1.sng').open('wb'))
s2 = SNG()
s2.update({
'Title': 'Alpha',
})
s2.export(base_dir.join('2.sng').open('wb'))
s3 = SNG()
s3.update({
'ChurchSongID': 'CS 2411',
})
s3.export(base_dir.join('3.sng').open('wb'))
base_dir.join('no.sng').write_binary('Nö sôñg!'.encode('latin-1'))
dest_file = tmpdir.join('export.xls')
main([str(base_dir), str(dest_file)])
wb = xlrd.open_workbook(str(dest_file))
assert [u'SongBeamer songs'] == wb.sheet_names()
work_sheet_0 = wb.sheet_by_index(0)
assert (3, 3) == (work_sheet_0.nrows, work_sheet_0.ncols)
got = [work_sheet_0.row_values(rx)
for rx in range(work_sheet_0.nrows)]
assert [
['', 'CS 2411', ''],
['Alpha', '', ''],
['Beta', 'CS 45', 'SB 23'],
] == got
assert "Missing Title in '3.sng'" in caplog.text
|
import os, sys, inspect
THISDIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# Add ../lib to the import path
sys.path.insert(0, os.path.dirname(THISDIR) + "/lib")
sys.path.insert(0, os.path.dirname(THISDIR) + "/RecipeBuilder")
# Generate coverage data with py.test test\test_recipeBuilder-builder.py --cov=recipebuilder
# Get pretty output with coverage report or coverage html
import fileIO, builder
builder.CONFIG = THISDIR + "/testRecipeBuilder.json"
builder.AUTHOR, builder.NAMESPACE, builder.NAME_FROM_INGREDIENT, builder.RECIPES, builder.IMPLEMENTATIONS = builder.readConfig()
builder.RESDIR = THISDIR + "/testRecipeBuilderInput"
builder.OUTDIR = THISDIR + "/"
'''
Assumption: if the field 'author' is parsed correctly (and is as expected) then
the entire config has been parsed correctly
'''
def test_readConfig():
author, namespace, name_from_ingredient, recipes, implementations = builder.readConfig()
assert(author == "testAuthor")
def test_getRecipe():
assert(builder.getRecipe("test0") == {"id": "test0", "pattern": ["A ", " A ", " A"],
"predef_ingredients": [], "group": "test_group0","count": 1})
def test_getRecipeTypePattern():
assert(builder.getRecipeType(builder.getRecipe("test0")) == "pattern")
def test_getRecipeTypeShapeless():
assert(builder.getRecipeType(builder.getRecipe("test1")) == "shapeless")
def test_getRecipeTypeUndefined():
assert(builder.getRecipeType(builder.getRecipe("test2")) == "")
'''
Assumption: if the field 'pattern' is parsed correctly (and is as expected) then
the recipe has been assembled and written correctly
'''
def test_assembleRecipePattern():
builder.assembleRecipe("pattern", {"id": "test0", "pattern": ["A ", " A ", " A"],
"predef_ingredients": [], "group": "test_group0","count": 1}, "thisIsA:result", ["thisIsA:ingredient"])
assert(fileIO.readJSON(THISDIR + "/result_from_ingredient.json")["pattern"] == ["A ", " A ", " A"])
def test_assembleRecipeShapeless():
builder.assembleRecipe("shapeless", {"id": "test1","shapeless": ["A", "B"],
"predef_ingredients": [{"A": "thisIsA:ingredient"}],"group": "test_group1","count": 1},
"thisIsA:result", ["thisIsA:notherIngredient"])
assert(fileIO.readJSON(THISDIR + "/result_from_notherIngredient.json")["group"] == "thisIsA:test_group1")
def test_getAllImplementationsPattern():
builder.IMPLEMENTATIONS = [{"usesid": "test0","ingredients": [["thisIsA:result", ["thisIsA:ingredient"]]]}]
builder.getAllImplementations()
assert(fileIO.readJSON(THISDIR + "/result_from_ingredient.json")["pattern"] == ["A ", " A ", " A"])
def test_getAllImplementationsShapeless():
builder.IMPLEMENTATIONS = [{"usesid": "test1","ingredients": [["thisIsA:result", ["thisIsA:notherIngredient"]]]}]
builder.getAllImplementations()
assert(fileIO.readJSON(THISDIR + "/result_from_notherIngredient.json")["group"] == "thisIsA:test_group1")
def test_getNamespacedIngredientSpecified():
assert(builder.getNamespacedIngredient("thisIsA:test") == "thisIsA:test")
def test_getNamespacedIngredientNotSpecified():
assert(builder.getNamespacedIngredient("test") == "thisIsA:test")
def test_stripNamespaceSpecified():
assert(builder.stripNamespace("thisIsA:test") == "test")
def test_stripNamespaceNotSpecified():
assert(builder.stripNamespace("test") == "test")
|
BLACK = "\033[0m"
RED = "\033[1;31m"
GREEN = "\033[1;32m"
YELLOW = "\033[1;33m"
BLUE = "\033[1;34m"
PURPLE = "\033[1;35m"
AQUA = "\033[1;36m"
red=lambda x:RED+str(x)+BLACK
green=lambda x:GREEN+str(x)+BLACK
yellow=lambda x:YELLOW+str(x)+BLACK
blue=lambda x:BLUE+str(x)+BLACK
purple=lambda x:PURPLE+str(x)+BLACK
aqua=lambda x:AQUA+str(x)+BLACK
|
#!/usr/bin/env python
#encoding:utf-8
#-------------------------------------------------------------------------------
# Name: Syndication feed class for subsribtion
# Purpose:
#
# Author: Mike
#
# Created: 29/01/2009
# Copyright: (c) CNPROG.COM 2009
# Licence: GPL V2
#-------------------------------------------------------------------------------
from django.contrib.syndication.feeds import Feed, FeedDoesNotExist
from models import Question
class RssLastestQuestionsFeed(Feed):
title = u"CNProg程序员问答社区-最新问题"
link = u"http://www.cnprog.com/questions/"
description = u"中国程序员的编程技术问答社区。我们做专业的、可协作编辑的技术问答社区。"
#ttl = 10
copyright = u'Copyright(c)2009.CNPROG.COM'
def item_link(self, item):
return '/questions/%s/' % item.id
def item_author_name(self, item):
return item.author.username
def item_author_link(self, item):
return item.author.get_profile_url()
def item_pubdate(self, item):
return item.added_at
def items(self, item):
return Question.objects.filter(deleted=False).order_by('-added_at')[:30]
def main():
pass
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.