content stringlengths 5 1.05M |
|---|
# encoding: UTF-8
'''
vnpy.api.okex的gateway接入
更新OKEX V3
By Chenzhipei
'''
import os
import json
from datetime import datetime, timedelta
from time import sleep
import time as td
from copy import copy
from threading import Condition
from queue import Queue
from threading import Thread
from time import sleep
import traceback
import ast
import zlib # 新增解压功能
from vnpy.api.okex import WsSpotApi, WsFuturesApi, SPOT_SYMBOL_PAIRS, CONTRACT_SYMBOL, CONTRACT_TYPE, SPOT_CURRENCY
from vnpy.api.okex.okexData import SPOT_TRADE_SIZE_DICT, SPOT_REST_ERROR_DICT, SPORT_WS_ERROR_DICT, FUTURES_ERROR_DICT
from vnpy.api.okex.OkcoinFutureAPI import OKEX_FUTURE_HOST,OKCoinFuture
from vnpy.trader.vtGateway import *
from vnpy.trader.vtFunction import getJsonPath
from vnpy.trader.vtConstant import EXCHANGE_OKEX, DIRECTION_NET, PRODUCT_SPOT, DIRECTION_LONG, DIRECTION_SHORT, PRICETYPE_LIMITPRICE, PRICETYPE_MARKETPRICE, OFFSET_OPEN, OFFSET_CLOSE
from vnpy.trader.vtConstant import STATUS_CANCELING,STATUS_CANCELLED, STATUS_NOTTRADED, STATUS_PARTTRADED, STATUS_ALLTRADED, STATUS_UNKNOWN, STATUS_REJECTED, PRODUCT_FUTURES
from vnpy.trader.vtObject import VtErrorData
# 价格类型映射
# 买卖类型: 限价单(buy/sell) 市价单(buy_market/sell_market)
priceTypeMap = {}
priceTypeMap['buy'] = (DIRECTION_LONG, PRICETYPE_LIMITPRICE)
priceTypeMap['buy_market'] = (DIRECTION_LONG, PRICETYPE_MARKETPRICE)
priceTypeMap['sell'] = (DIRECTION_SHORT, PRICETYPE_LIMITPRICE)
priceTypeMap['sell_market'] = (DIRECTION_SHORT, PRICETYPE_MARKETPRICE)
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
priceContractOffsetTypeMap = {}
priceContractOffsetTypeMap['1'] = (DIRECTION_LONG, OFFSET_OPEN)
priceContractOffsetTypeMap['2'] = (DIRECTION_SHORT, OFFSET_OPEN)
priceContractOffsetTypeMap['3'] = (DIRECTION_SHORT, OFFSET_CLOSE)
priceContractOffsetTypeMap['4'] = (DIRECTION_LONG, OFFSET_CLOSE)
priceContractTypeMapReverse = {v: k for k, v in priceContractOffsetTypeMap.items()}
# 委托状态印射
statusMap = {}
statusMap[-1] = STATUS_CANCELLED # 撤单
statusMap[0] = STATUS_NOTTRADED # 未成交
statusMap[1] = STATUS_PARTTRADED # 部分成交
statusMap[2] = STATUS_ALLTRADED # 全部成交
statusMap[3] = STATUS_UNKNOWN
statusMap[4] = STATUS_UNKNOWN # 未知状态
statusMap[5] = STATUS_CANCELING # 撤销中
statusMap['open'] = STATUS_NOTTRADED
statusMap['cancelled'] = STATUS_CANCELLED
statusMap['filled'] = STATUS_ALLTRADED
statusMap['part_filled'] = STATUS_PARTTRADED
EVENT_OKEX_INDEX_FUTURE = "eFuture_Index_OKEX"
class OkexGateway(VtGateway):
"""OKEX交易接口"""
# ----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='OKEX'):
"""Constructor"""
super(OkexGateway, self).__init__(eventEngine, gatewayName)
self.api_spot = OkexSpotApi(self) # 现货交易接口
self.api_futures = OkexFuturesApi(self) # 期货交易接口
self.apiKey = EMPTY_STRING
self.secretKey = EMPTY_STRING
self.passphrase = EMPTY_STRING
self.leverage = 0
self.spot_connected = False # 现货交易接口连接状态
self.use_spot_symbol_pairs = set() # 使用现货合约对(从配置文件读取,减少运算量)
self.auto_subscribe_symbol_pairs = set() # 自动订阅现货合约对清单
self.auto_subscribe_future_symbols = set() # 自动订阅期货合约清单
self.futures_connected = False # 期货交易接口连接状态
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.hbCount = 0 # 心跳触发倒计时
self.hbTrigger = 30 # 心跳触发点
# gateway 配置文件
self.fileName = self.gatewayName + '_connect.json'
self.filePath = getJsonPath(self.fileName, __file__)
self.log_message = False
self.qryFunctionList = []
def connect(self):
"""连接"""
# 载入json文件
try:
f = open(self.filePath, 'r')
except IOError:
self.writeError(u'OkexGateway.connect:读取连接配置{}出错,请检查'.format(self.filePath))
return
setting = json.load(f)
try:
self.apiKey = str(setting['apiKey'])
self.secretKey = str(setting['secretKey'])
self.passphrase = str(setting['passphrase'])
trace = setting['trace']
self.leverage = setting.get('leverage', 1)
spot_connect = setting['spot_connect']
futures_connect = setting['futures_connect']
self.log_message = setting.get('log_message', False)
# 若限定使用的合约对
if "symbol_pairs" in setting.keys():
self.use_spot_symbol_pairs = set(setting["symbol_pairs"])
# 若希望连接后自动订阅
if 'auto_subscribe' in setting.keys():
auto_subscribe = set(setting['auto_subscribe'])
for symbol in auto_subscribe:
if ':' in symbol or 'SWAP' in symbol:
self.auto_subscribe_future_symbols.add(symbol)
else:
self.auto_subscribe_symbol_pairs.add(symbol)
self.qryEnabled = setting.get('qryEnabled', True)
except KeyError:
self.writeError(u'OkexGateway.connect:连接配置缺少字段,请检查')
return
if spot_connect:
self.api_spot.active = True
for symbol_pair in self.auto_subscribe_symbol_pairs:
self.writeLog(u'自动订阅现货合约:{}'.format(symbol_pair))
self.api_spot.registerSymbolPairArray.add(symbol_pair)
self.api_spot.connect(self.apiKey, self.secretKey,self.passphrase, trace)
self.writeLog(u'connect okex ws spot api')
self.api_spot.setSymbolPairs(self.use_spot_symbol_pairs)
if futures_connect:
self.api_futures.active = True
self.api_futures.connect(self.apiKey, self.secretKey,self.passphrase, trace)
self.writeLog(u'connect okex ws contract api')
for future_symbol in self.auto_subscribe_future_symbols:
self.writeLog(u'添加订阅期货合约:{}'.format(future_symbol))
self.api_futures.registered_symbols.add(future_symbol)
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'接口初始化成功'
self.onLog(log)
# 启动查询
self.initQuery()
self.startQuery()
def sendTransfer(self, symbol, amount, tfrom, tto):
"""
转币操作
:param symbol:
:param amount:
:param tfrom:
:param tto:
:return:
"""
self.api_futures.sendTransfer(symbol, amount, tfrom, tto)
def writeLog(self, content):
"""
记录日志文件
:param content:
:return:
"""
if self.logger:
self.logger.info(content)
def writeError(self, content, error_id = 0):
"""
发送错误通知/记录日志文件
:param content:
:return:
"""
error = VtErrorData()
error.gatewayName = self.gatewayName
error.errorID = error_id
error.errorMsg = content
self.onError(error)
if self.logger:
self.logger.error(content)
def checkStatus(self):
"""
检查接口运行状态
:return:
"""
if not self.spot_connected and not self.futures_connected:
return False
# if self.spot_connected:
# return self.api_spot.checkStatus()
if self.futures_connected:
return self.api_futures.checkStatus()
def subscribe(self, subscribeReq):
"""
订阅行情
:param subscribeReq: VtSubscribeReq,
:return:
"""
try:
symbol_pair_gateway = subscribeReq.symbol
arr = symbol_pair_gateway.split('.')
# 提取品种对 eth_usdt
symbol_pair = arr[0]
if symbol_pair.find('-USDT')>0:
if self.api_spot and self.spot_connected:
self.api_spot.subscribe(subscribeReq)
else:
self.writeError(u'现货接口未创建/未连接,无法调用subscribe')
self.auto_subscribe_symbol_pairs.add(symbol_pair)
else:
if self.api_futures and self.futures_connected:
self.api_futures.subscribe(subscribeReq)
else:
self.writeError(u'期货接口未创建/未连接,无法调用subscribe')
except Exception as ex:
self.writeError(u'OkexGateway.subscribe 异常,请检查日志:{}'.format(str(ex)))
self.writeLog(u'OkexGateway.subscribe Exception :{},{}'.format(str(ex), traceback.format_exc()))
def sendOrder(self, orderReq):
"""发单"""
order_req_symbol = orderReq.symbol
order_req_symbol = order_req_symbol.replace('.{}'.format(EXCHANGE_OKEX),'')
if order_req_symbol.split('-')[-1].isdigit() is False and order_req_symbol.endswith('SWAP') is False:
if self.api_spot and self.spot_connected:
return self.api_spot.spotSendOrder(orderReq)
else:
self.writeError(u'现货接口未创建/连接,无法调用sendOrder')
return ''
else:
if self.api_futures and self.futures_connected:
return self.api_futures.sendFutureSendOrder(orderReq)
else:
self.writeError(u'期货接口未创建/连接,无法调用sendOrder')
return ''
def checkOrderStatus(self, orderReq):
"""检查订单情况"""
order_req_symbol = orderReq.symbol
order_req_symbol = order_req_symbol.replace('.{}'.format(EXCHANGE_OKEX), '')
if 'USDT' in order_req_symbol:
if self.spot_connected:
return self.api_spot.querySpotOrder(orderReq)
else:
self.writeError(u'现货接口未创建/连接,checkOrder')
else:
if self.futures_connected:
return self.api_futures.queryFutureOrder(orderReq)
else:
self.writeError(u'期货接口未创建/连接,无法调用checkOrder')
def cancelOrder(self, cancelOrderReq):
"""撤单"""
order_req_symbol = cancelOrderReq.symbol
order_req_symbol = order_req_symbol.replace('.{}'.format(EXCHANGE_OKEX), '')
if order_req_symbol.find('USDT') != -1:
if self.spot_connected:
self.api_spot.spotCancel(cancelOrderReq)
else:
self.writeError(u'现货接口未创建/连接,无法调用cancelOrder')
else:
if self.futures_connected:
self.api_futures.sendFutureCancelOrder(cancelOrderReq)
else:
self.writeError(u'期货接口未创建/连接,无法调用cancelOrder')
def qryAccount(self):
"""查询账户资金"""
if self.spot_connected:
self.api_spot.spotUserInfo()
if self.futures_connected:
self.api_futures.queryAllFutureAccountInfo()
def qryOrderInfo(self):
# if self.spot_connected:
# self.api_spot.spotAllOrders()
#
if self.futures_connected:
# self.api_futures.queryAllFutureOrderInfo()
pass
def qryPosition(self):
"""查询持仓"""
if self.futures_connected:
self.api_futures.queryAllFuturePositionInfo()
def close(self):
"""关闭"""
if self.spot_connected:
self.api_spot.active = False
self.api_spot.close()
if self.futures_connected:
self.api_futures.active = False
self.api_futures.close()
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount, self.qryOrderInfo, self.qryPosition]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
def heartbeat(self,event):
"""
心跳
:return:
"""
self.hbCount += 1
if self.hbCount < self.hbTrigger:
return
# 清空倒计时
self.hbCount = 0
# 发送心跳请求
if self.api_spot.active and self.spot_connected:
self.api_spot.sendHeartBeat()
if self.api_futures.active and self.futures_connected:
self.api_futures.sendHeartBeat()
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
# self.eventEngine.register(EVENT_TIMER, self.heartbeat)
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
def onFutureIndexPush(self, push_dic):
"""
合约指数更新事件
:param push_dic:
:return:
"""
event1 = Event(type_=EVENT_OKEX_INDEX_FUTURE)
event1.dict_['data'] = push_dic
self.eventEngine.put(event1)
class WalletApi():
pass
class OkexSpotApi(WsSpotApi):
"""okex的API实现"""
def __init__(self, gateway):
"""Constructor"""
super(WsSpotApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.active = False # 若为True则会在断线后自动重连
self.cbDict = {} # 回调函数字典
self.tickDict = {} # 缓存最新tick字典
self.orderDict = {} # 委托单字典
self.localOrderDict = {} # 本地缓存的order_dict,key 是 localNo.gatewayName
self.clientorderDict = {} #client_oid和orderId的对应,key是order_id
self.channelSymbolMap = {} # 请求数据类型与合约的映射字典
self.localNo = 10000 # 本地委托号
self.orderId = 1000000
self.loginTime = 0
self.localNoQueue = Queue() # 未收到系统委托号的本地委托号队列
self.localNoDict = {} # key为本地委托号,value为系统委托号
self.orderIdDict = {} # key为系统委托号,value为本地委托号
self.cancelDict = {} # key为本地委托号,value为撤单请求
self.recordOrderId_BefVolume = {} # 记录的之前处理的量
self.tradeID = 10000
# 缺省启动得品种对队列
self.use_symbol_pairs = set([])
# 已登记的品种对队列
self.registerSymbolPairArray = set([])
# 初始化回调函数
self.initCallback()
def setSymbolPairs(self, symbol_pairs):
"""
设置合约对
:param symbol_pairs: set[]
:return:
"""
if isinstance(symbol_pairs,list):
self.use_symbol_pairs = set(symbol_pairs)
elif isinstance(symbol_pairs, set):
self.use_symbol_pairs = symbol_pairs
self.gateway.writeLog(u'设置合约对:{}'.format(symbol_pairs))
def onMessage(self, *args):
"""
响应信息处理,包括心跳响应、请求响应、数据推送
:param ws: websocket接口
:param evt: 消息体
:return:
"""
if len(args) == 0:
return
evt = args[-1]
if isinstance(evt,bytes):
# bytes => str => json
decmp_evt = self.inflate(evt)
ws_data = self.readData(decmp_evt)
else:
ws_data = self.readData(evt)
if isinstance(ws_data, dict):
if 'event' in ws_data:
if ws_data['event'] == 'pong':
self.gateway.writeLog(u'heart beat response {}'.format(datetime.now()))
return
if ws_data['event'] == 'login':
self.gateway.writeLog(u'现货接口连接成功')
self.onOpens()
# else:
# self.gateway.writeLog(u'其他 event:{}'.format(ws_data))
if 'table' in ws_data:
if ws_data['table'].endswith('ticker'):
self.onTicker(ws_data['data'])
if ws_data['table'].endswith('depth5'):
self.onDepth(ws_data['data'])
if ws_data['table'].endswith('account'):
self.onSpotUserInfo(ws_data['data'])
if ws_data['table'].endswith('order'):
self.onSpotSubOrder(ws_data['data'])
if 'client_oid' in ws_data:
self.onSpotOrder(ws_data)
if isinstance(ws_data, list):
for data in ws_data:
if 'client_oid' in data:
self.onSpotOrderInfo(data)
def onError(self, *args):
"""Api方法重载,错误推送"""
if len(args)== 0:
return
evt = args[-1]
error = VtErrorData()
error.gatewayName = self.gatewayName
error.errorID = 0
if isinstance(evt, bytes):
decom_evt = self.inflate(evt)
error.errorMsg = str(decom_evt)
else:
error.errorMsg = str(evt)
self.gateway.onError(error)
def onErrorMsg(self, data):
error = VtErrorData()
error.gatewayName = self.gatewayName
if 'data' in data and 'error_code' in data['data']:
error_code =data['data']['error_code']
error.errorMsg = u'SpotApi 出错:{}'.format(SPORT_WS_ERROR_DICT.get(str(error_code)))
error.errorID = error_code
else:
error.errorMsg = str(data)
self.gateway.onError(error)
def onClose(self, *args):
"""
断开接口
:param ws: websocket
:return:
"""
# 如果尚未连上,则忽略该次断开提示
if not self.gateway.spot_connected:
self.gateway.writeLog(u'spot接口未连接,忽略断开提示')
return
self.gateway.spot_connected = False
self.gateway.writeLog(u'Spot服务器连接断开')
# 重新连接
if self.active:
def reconnect():
while not self.gateway.spot_connected:
self.gateway.writeLog(u'等待10秒后重新连接')
sleep(10)
if not self.gateway.spot_connected:
self.reconnect()
t = Thread(target=reconnect)
t.start()
def subscribe(self, subscribeReq):
"""
订阅行情
:param subscribeReq:
:return:
"""
self.gateway.writeLog(u'SpotApi.subscribe({})'.format(subscribeReq.symbol))
symbol_pair_gateway = subscribeReq.symbol
arr = symbol_pair_gateway.split('.')
symbol_pair = arr[0]
# 若品种对未登记过添加登记品种对
if symbol_pair not in self.registerSymbolPairArray:
self.registerSymbolPairArray.add(symbol_pair)
self.subscribeSingleSymbol(symbol_pair)
self.spotOrderInfo(symbol_pair)
def subscribeSingleSymbol(self, symbol):
"""
订阅合约(对)
:param symbol:合约(对)
:return:
"""
self.gateway.writeLog(u'SpotApi.subscribeSingleSymbol({})'.format(symbol))
self.subscribeSpotTicker(symbol)
self.subscribeSpotDepth(symbol)
self.subscribeSpotTrades(symbol)
self.spotOrderInfo(symbol)
def spotAllOrders(self):
"""
查询所有委托清单
:return:
"""
# self.gateway.writeLog('SpotApi.spotAllOrders()')
#
# # 根据已登记的品种对清单,逐一发委托查询
# for symbol in self.registerSymbolPairArray:
# if symbol in SPOT_SYMBOL_PAIRS:
# self.spotOrderInfo(symbol, '-1')
pass
def onOpen(self, *args):
"""
ws连接成功事件回调函数
:param ws:
:return:
"""
self.gateway.spot_connected = True
self.gateway.writeLog(u'Spot服务器连接成功')
self.loginTime = int(datetime.now().strftime('%y%m%d%H%M%S'))*self.orderId
self.login()
def onOpens(self):
"""
登录成功后,进行一系列查询过程
:return:
"""
for symbol_pair in list(self.registerSymbolPairArray):
self.subscribeSingleSymbol(symbol_pair)
self.spotUserInfo()
def onSpotSubDeals(self, ws_data):
"""
当前现货委托成交信息回报
:param ws_data:
:return:
"""
self.gateway.writeLog(u'SpotApi.onSpotSubDeals:{}'.format(ws_data))
def initCallback(self):
"""
初始化回调函数
:return:
"""
pass
def checkStatus(self):
"""
检查状态
:return:
"""
if len(self.tickDict)>0:
symbol,last_tick = list(self.tickDict.items())[0]
if (datetime.now()-last_tick.datetime).seconds > 60:
return False
return True
else:
return False
def onTicker(self, ws_data):
"""
tick行情数据回报
:param ws_data: dict
:return:
"""
data = ws_data[0]
try:
symbol = data['instrument_id']
if symbol not in self.tickDict:
tick = VtTickData()
tick.exchange = EXCHANGE_OKEX
tick.symbol = symbol
tick.vtSymbol = symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
tick.highPrice = float(data['high_24h'])
tick.lowPrice = float(data['low_24h'])
tick.lastPrice = float(data['last'])
tick.volume = float(data['base_volume_24h'].split('.')[0])
except Exception as ex:
self.gateway.writeError(u'SpotApi.onTicker异常')
self.gateway.writeLog('SpotApi.onTicker exception:{},{} '.format(str(ex), traceback.format_exc()))
def onDepth(self, ws_data):
"""
:param ws_data:
:return:
"""
data = ws_data[0]
symbol = data['instrument_id']
# 更新tick
if symbol not in self.tickDict:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
tick.bidPrice1, tick.bidVolume1 = data['bids'][0][0:2]
tick.bidPrice2, tick.bidVolume2 = data['bids'][1][0:2]
tick.bidPrice3, tick.bidVolume3 = data['bids'][2][0:2]
tick.bidPrice4, tick.bidVolume4 = data['bids'][3][0:2]
tick.bidPrice5, tick.bidVolume5 = data['bids'][4][0:2]
tick.askPrice1, tick.askVolume1 = data['asks'][0][0:2]
tick.askPrice2, tick.askVolume2 = data['asks'][1][0:2]
tick.askPrice3, tick.askVolume3 = data['asks'][2][0:2]
tick.askPrice4, tick.askVolume4 = data['asks'][3][0:2]
tick.askPrice5, tick.askVolume5 = data['asks'][4][0:2]
tick.bidPrice1, tick.bidVolume1 = float(tick.bidPrice1), float(tick.bidVolume1)
tick.bidPrice2, tick.bidVolume2 = float(tick.bidPrice2), float(tick.bidVolume2)
tick.bidPrice3, tick.bidVolume3 = float(tick.bidPrice3), float(tick.bidVolume3)
tick.bidPrice4, tick.bidVolume4 = float(tick.bidPrice4), float(tick.bidVolume4)
tick.bidPrice5, tick.bidVolume5 = float(tick.bidPrice5), float(tick.bidVolume5)
tick.askPrice1, tick.askVolume1 = float(tick.askPrice1), float(tick.askVolume1)
tick.askPrice2, tick.askVolume2 = float(tick.askPrice2), float(tick.askVolume2)
tick.askPrice3, tick.askVolume3 = float(tick.askPrice3), float(tick.askVolume3)
tick.askPrice4, tick.askVolume4 = float(tick.askPrice4), float(tick.askVolume4)
tick.askPrice5, tick.askVolume5 = float(tick.askPrice5), float(tick.askVolume5)
tick.date, tick.time,tick.datetime = self.generateDateTime(data['timestamp'])
tick.tradingDay = tick.date
if tick.lastPrice == 0 and tick.askPrice1 != 0 and tick.bidPrice1 != 0:
tick.lastPrice = (tick.askPrice1 + tick.bidPrice1) / 2
if tick.lastPrice == 0 or tick.askPrice1 == 0 or tick.bidPrice1 == 0:
print('onDepth drop tick {},lastprice:{},askprice1={},bidPrice1:{}'
.format(tick.vtSymbol, tick.lastPrice, tick.askPrice1, tick.bidPrice1))
return
newtick = copy(tick)
self.gateway.onTick(newtick)
def onSpotBalance(self, ws_data):
"""
现货账号更新回报
交易发生金额变动之后会触发这个回报
:param data:
:return:
"""
# print data
data = ws_data.get('data')
if data is None:
self.gateway.writeError(u'SpotApi.onSpotBalance, no data in ws_data')
return
data_info = data.get('info')
if data_info is None:
self.gateway.writeError(u'SpotApi.onSpotBalance, no info in data')
return
data_info_free = data_info.get('free', {}) # 可用资产
data_info_freezed = data_info.get('freezed') # 冻结资产
if data_info_freezed is None or not isinstance(data_info_freezed, dict):
self.gateway.writeError(u'SpotApi.onSpotBalance, no freezed in data_info')
self.gateway.writeLog(ws_data)
return
# 只更新freezed内容
for symbol, position in data_info_freezed.items():
pos = VtPositionData()
pos.gatewayName = self.gatewayName
pos.symbol = symbol + "." + EXCHANGE_OKEX
pos.vtSymbol = symbol + "." + EXCHANGE_OKEX
pos.vtPositionName = symbol + u'[现货]'
pos.direction = DIRECTION_NET
pos.frozen = float(position)
pos.position = pos.frozen + float(data_info_free.get(symbol, 0))
self.gateway.onPosition(pos)
def onSpotUserInfo(self, ws_data):
"""现货账户资金推送"""
if isinstance(ws_data, str):
ws_data = json.loads(ws_data)
for data in ws_data:
account = VtAccountData()
account.gatewayName = self.gatewayName
account.accountID = data['currency']
account.vtAccountID = data['currency'] + ".{}".format(EXCHANGE_OKEX)
account.balance = float(data['balance'])
self.gateway.onAccount(account)
def onSpotSubUserInfo(self, ws_data):
"""
现货子账户资金推送
:param ws_data:
:return:
"""
if 'data' not in ws_data or 'info' not in ws_data['data'] or 'funds' not in ws_data['data']['info']:
self.gateway.writeError(u'SpotApi.onSpotSubUserInfo 数据不完整,请检查日志')
self.gateway.writeLog(ws_data)
return
data = ws_data.get('data')
data_info = data.get('info')
# 持仓信息
for symbol in SPOT_CURRENCY:
if symbol in data_info['free']:
pos = VtPositionData()
pos.gatewayName = self.gatewayName
pos.symbol = symbol # + "." + EXCHANGE_OKEX
pos.vtSymbol = symbol #+ "." + EXCHANGE_OKEX
pos.vtPositionName = symbol
pos.direction = DIRECTION_NET
pos.frozen = float(data_info['freezed'][symbol])
pos.position = pos.frozen + float(data_info['free'][symbol])
self.gateway.onPosition(pos)
def onSpotSubOrder(self, ws_data):
"""
交易委托更新回报(发生部分成交/全部成交/拒单/撤销时,API推送的回报)
:param ws_data:ws推送的单个委托更新数据
:return:
"""
data = ws_data[0]
if 'client_oid' in data:
if data['client_oid'] is not '' and data['client_oid'] is not '0':
orderId = str(data['client_oid'])
else:
orderId = str(data['order_id'])
self.clientorderDict[orderId] = str(data['order_id'])
else:
orderId = self.clientorderDict[data['order_id']]
if orderId not in self.orderIdDict:
while str(self.localNo) in self.localNoDict:
self.localNo += 1
localNo = str(self.localNo)
self.localNoDict[localNo] = orderId
self.orderIdDict[orderId] = localNo
self.gateway.writeLog(u'onFutureOrderInfo add orderid: local:{}<=>okex:{}'.format(localNo, orderId))
if orderId not in self.orderDict:
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = data['instrument_id']
order.vtSymbol = order.symbol
order.orderID = orderId
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = float(data['price'])
order.totalVolume = float(data['size'])
order.direction, priceType =data['side'],'limit'
order.offset = OFFSET_OPEN
create_date, order.orderTime,_ = self.generateDateTime(data['timestamp'])
self.orderDict[orderId] = order
else:
order = self.orderDict[orderId]
order.tradedVolume = float(data['filled_size'])
order.status = statusMap[data['status']]
if order.status == STATUS_CANCELLED:
dt = datetime.now()
order.cancelTime = dt.strftime("%H:%M:%S.%f")
self.gateway.writeLog(u'onSpotSubOrder:发出OnOrder,vtOrderId={},orderStatus:{}'.format(order.vtOrderID,order.status))
self.gateway.onOrder(copy(order))
bef_volume = self.recordOrderId_BefVolume.get(orderId, 0.0)
now_volume = float(data['filled_size']) - bef_volume
if now_volume > 0.000001:
trade = VtTradeData()
trade.gatewayName = self.gatewayName
trade.symbol = order.symbol
trade.vtSymbol = order.symbol
self.tradeID += 1
trade.tradeID = str(self.tradeID)
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = orderId
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
trade.price = float(data['filled_notional'])/float(data['filled_size'])
trade.volume = float(now_volume)
trade.direction, priceType = data['side'],'limit'
trade.tradeTime = datetime.now().strftime('%H:%M:%S')
self.gateway.writeLog(u'onSpotSubOrder:发出onTrader,vtOrderId={}'.format(trade.vtOrderID))
self.gateway.onTrade(trade)
def onSpotOrderInfo(self, ws_data):
"""
所有委托信息查询响应
self.spotOrderInfo(symbol_pair, '-1')时,返回symbol_pair所有未完成订单
self.spotOrderInfo(symbol_pair, orderId)时,返回symbol_pair、orderId的订单
:param ws_data:orders 清单
:return:
"""
self.gateway.writeLog(u'SpApi.onSpotOrderInfo:{}'.format(ws_data))
# 获取channel的数据
data = ws_data
# 获取所有返回的委托单清单
if data['client_oid'] is not '0' and data['client_oid'] is not '':
orderId = str(data['client_oid'])
self.clientorderDict[ws_data['order_id']] = orderId
else:
orderId = str(data['order_id'])
self.clientorderDict[ws_data['order_id']] = orderId
localNo = str(self.localNo)
if orderId not in self.orderIdDict:
while str(self.localNo) in self.localNoDict:
self.localNo += 1
localNo = str(self.localNo)
# 绑定localNo 与 orderId
self.localNoDict[localNo] = orderId
self.orderIdDict[orderId] = localNo
else:
# orderid在本地缓存中,
localNo = self.orderIdDict[orderId]
# 检验 localNo是否在本地缓存,没有则补充
if localNo not in self.localNoDict:
self.localNoDict[localNo] = orderId
self.gateway.writeLog(u'onFutureOrderInfo update orderid: local:{}<=>okex:{}'.format(localNo, orderId))
if orderId not in self.orderDict:
# 不在本地委托单缓存时,添加
self.gateway.writeLog(u'onSpotOrderInfo:添加至委托单缓存:{}'.format(orderId))
order = VtOrderData()
order.gatewayName = self.gatewayName
# order.symbol = spotSymbolMap[d['symbol']]
order.symbol =data['instrument_id']
order.vtSymbol = order.symbol
order.orderID = orderId
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = float(data['price'])
order.totalVolume = int(data['size'])
order.direction, priceType = data['side'],'现货开仓'
create_date, order.orderTime,_ = self.generateDateTime(data['created_at'])
self.orderDict[orderId] = order
else:
order = self.orderDict[orderId] # 使用本地缓存
order.tradedVolume = int(data['filled_size']) # 更新成交数量
order.status = data['status'] # 更新成交状态
self.gateway.writeLog('orderId:{},tradedVolume:{},status:{}'.format(order.vtOrderID,order.tradedVolume,order.status))
self.gateway.onOrder(copy(order)) # 提交现货委托单到vtGatway.onOrder事件
def onSpotOrder(self, ws_data):
"""
交易委托回报
:param ws_data:
:return:
"""
client_oid = ws_data.get('client_oid')
if client_oid is not '':
self.clientorderDict[ws_data['order_id']] = client_oid
vtOrderId = '.'.join([self.gatewayName,str(client_oid)])
order = self.localOrderDict.get(vtOrderId)
if order:
order.orderID = client_oid
order.symbol = '.'.join([order.symbol, self.gatewayName])
order.vtsymbol = order.symbol
order.status = STATUS_NOTTRADED
order.updateTime = datetime.now().strftime("%H:%M:%S.%f")
order.totalVolume=order.totalVolume
self.gateway.onOrder(order)
return
def onSpotCancelOrder(self, ws_data):
"""
撤单请求回报
:param ws_data:
:return:
"""
self.gateway.writeLog(u'SpotApi.onSpotCancelOrder()')
data = ws_data.get('data', {})
if 'error_code' in data:
error_id = data.get('error_code')
self.gateway.writeError(u'SpotApi.onSpotCancelOrder 委托返回错误:{}'.format(SPORT_WS_ERROR_DICT.get(str(error_id))), error_id=error_id)
self.gateway.writeLog(ws_data)
return
ok_order_id = data.get('order_id')
ok_result = data.get('result',False)
if ok_order_id is None:
self.gateway.writeError(u'SpotApi.onSpotCancelOrder 委托返回中,没有order_id')
self.gateway.writeLog(ws_data)
return
if not ok_result:
self.gateway.writeError(u'SpotApi.onSpotCancelOrder 撤单失败')
self.gateway.writeLog(ws_data)
return
ok_order_id = str(ok_order_id)
localNo = self.orderIdDict[ok_order_id]
if ok_order_id not in self.orderDict:
self.gateway.writeLog(u'onSpotCancelOrder,{}的订单不在self.orderDict,创建order对象'.format(ok_order_id))
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = '.'.join([data['symbol'], EXCHANGE_OKEX])
order.vtSymbol = order.symbol
order.orderID = localNo
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
self.orderDict[ok_order_id] = order
else:
order = self.orderDict[ok_order_id]
order.status = STATUS_CANCELLED
dt = datetime.now()
order.cancelTime = dt.strftime("%H:%M:%S.%f")
# 发送现货委托单(撤单消息)到vtGateway
self.gateway.onOrder(order)
def spotSendOrder(self, req):
"""
发出委托指令
:param req:
:return:
"""
self.gateway.writeLog(u'SpotApi.spotSendOrder()')
symbol = (req.symbol.split('.'))[0]
symbol = symbol.lower()
if not symbol in self.registerSymbolPairArray:
self.registerSymbolPairArray.add(symbol)
# 获取匹配okex的订单类型
type_ = priceTypeMapReverse[(req.direction, req.priceType)]
# 本地委托号加1,并将对应字符串保存到队列中,返回基于本地委托号的vtOrderID
# self.localNo += 1
self.orderId += 1
# self.gateway.writeLog(u'localNo:{}'.format(self.localNo))
# self.localNoQueue.put(str(self.localNo))
orderId = 'Spot'+ str(self.loginTime + self.orderId)
vtOrderID = '.'.join([self.gatewayName, orderId])
self.gateway.writeLog(u'创建本地Order对象,vtOrderId:{}'.format(vtOrderID))
# 创建本地order对象
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = symbol
order.vtSymbol = order.symbol
order.orderID = orderId
order.vtOrderID = vtOrderID
order.price = req.price
order.totalVolume = req.volume
# order.direction, priceType = priceTypeMap[type_]
order.direction = req.direction
order.offset = OFFSET_OPEN if req.direction == DIRECTION_LONG else OFFSET_CLOSE
order.status = STATUS_NOTTRADED
dt = datetime.now()
order.orderTime = dt.strftime("%H:%M:%S.%f")
self.localOrderDict[vtOrderID] = order
if order.direction == DIRECTION_LONG:
type_ = 'buy'
if order.direction == DIRECTION_SHORT:
type_ = 'sell'
# 调用ws api发送委托
self.gateway.writeLog(u'调用ws api发送委托')
self.spotTrade(symbol, type_, str(req.price), str(req.volume),orderId)
self.gateway.writeLog('SpotSendOrder:symbol:{},Type:{},price:{},volume:{}'.format(symbol, type_, str(req.price), str(req.volume)))
return vtOrderID
def spotCancel(self, req):
"""
发出撤单指令
:param req:
:return:
"""
symbol = (req.symbol.split('.'))[0]
localNo = req.orderID
self.spotCancelOrder(symbol, localNo)
# else:
# # 如果在系统委托号返回前客户就发送了撤单请求,则保存
# # 在cancelDict字典中,等待返回后执行撤单任务
# self.cancelDict[localNo] = req
def generateDateTime(self, s):
"""生成时间"""
s = s.split('.')[0]
s = s.split('T')[0]+u' '+s.split('T')[1]
s = td.mktime(td.strptime(s, "%Y-%m-%d %H:%M:%S"))
s = s + 60*60*8
dt = datetime.fromtimestamp(s)
time = dt.strftime("%H:%M:%S.%f")
date = dt.strftime("%Y%m%d")
return date, time,dt
class OkexFuturesApi(WsFuturesApi):
"""okex的合约API实现"""
def __init__(self, gateway):
"""Constructor"""
super(OkexFuturesApi, self).__init__() #TODO????
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.active = False # 若为True则会在断线后自动重连
self.cbDict = {}
self.tickDict = {}
self.orderDict = {}
self.localOrderDict = {} # 本地缓存的order_dict,key 是 localNo.gatewayName
self.channelSymbolMap = {}
# self.orderId = 1000000 #
self.loginTime = 0
self.localNo = 0 # 本地委托号
self.localNoQueue = Queue() # 未收到系统委托号的本地委托号队列
self.localNoDict = {} # key为本地委托号,value为系统委托号
self.orderIdDict = {} # key为系统委托号,value为本地委托号
self.cancelDict = {} # key为本地委托号,value为撤单请求
self.recordOrderId_BefVolume = {} # 记录的之前处理的量
self.tradeID = 0
self.registered_symbols = set([])
self.queryed_pos_symbols = set([])
self._use_leverage = "10" # 缺省使用的杠杆比率
self.bids_depth_dict = {}
self.asks_depth_dict = {}
self.contract_name_dict = {}
self.contractIdToSymbol = {}
self.CONTRACT_SYMBOL_SWAP = ["BTC-USD-SWAP", "ETH-USD-SWAP", "EOS-USD-SWAP", "ETC-USD-SWAP", "LTC-USD-SWAP"]
def queryFutureOrder(self, orderReq):
"""
:param orderReq:
:return:
"""
orderID = orderReq.orderID
symbol = orderReq.symbol
return self.queryFutureOrderStatus(symbol, orderID)
def treatFutureOrderStatus(self, response):
if response == {}:
return STATUS_CANCELLED
else:
index = response['status']
orderIndex = {'-1':STATUS_CANCELLED, '0':STATUS_NOTTRADED, '1':STATUS_PARTTRADED, '2':STATUS_ALLTRADED}
return orderIndex[index]
def queryFutureAccount(self):
"""
:return:
"""
for symbol in self.registered_symbols:
symbol = self.dealSymbolFunc(symbol)[1]
self.queryFuturesAccountInfo(symbol)
def setLeverage(self, __leverage):
"""
设置杠杆比率
:param __leverage:
:return:
"""
self._use_leverage = __leverage
def onMessage(self, *args):
"""
信息推送的处理
:param ws:
:param evt:
:return:
"""
# self.onClose()
if len(args)==0:
return
evt = args[-1]
if isinstance(evt,bytes):
decmp_evt = self.inflate(evt) # bytes => str => json
ws_data = self.readData(decmp_evt)
else:
ws_data = self.readData(evt)
if self.gateway.log_message:
self.gateway.writeLog(u'FutureApi.onMessage:{}'.format(ws_data))
if isinstance(ws_data, dict):
if 'event' in ws_data:
if ws_data['event'] == 'pong':
# TODO:设置重连机制
return
elif ws_data['event'] == 'login':
self.writeLog(u'期货接口登陆成功')
self.onOpens()
# else:
# self.gateway.writeLog(ws_data)
elif 'table' in ws_data:
if ws_data['table'].endswith('ticker'):
self.onTicker(ws_data['data'])
elif ws_data['table'].endswith('depth5'):
self.onDepth(ws_data['data'])
elif ws_data['table'].endswith('account'):
self.onFutureAccountInfo(ws_data['data'])
elif ws_data['table'].endswith('position'):
self.onFuturePositionInfo(ws_data['data'])
elif ws_data['table'].endswith('order'):
self.onFutureTrades(ws_data['data'])
elif 'client_oid' in ws_data: # 委托后发送回应
self.treatRespondFutureOrder(ws_data)
else: # 报错信息 35008
self.gateway.writeLog(u'很烦很烦')
self.gateway.writeLog(ws_data)
else:
self.gateway.writeLog(u'其他信息')
self.gateway.writeLog(ws_data)
def onError(self, *args):
"""重载WsFutureApi.onError错误Event推送"""
if len(args)== 0:
return
evt = args[-1]
error = VtErrorData()
error.gatewayName = self.gatewayName
if isinstance(evt,bytes):
decom_evt = self.inflate(evt)
error.errorMsg = str(decom_evt)
else:
error.errorMsg = str(evt)
self.gateway.onError(error)
def onErrorMsg(self, data):
"""错误信息处理"""
error = VtErrorData()
error.gatewayName = self.gatewayName
if 'data' in data and 'error_code' in data['data']:
error_code = str(data["data"]["error_code"])
error.errorID = error_code
error.errorMsg = u'FutureApi Error:{}'.format(FUTURES_ERROR_DICT.get(error_code))
self.gateway.onError(error)
def reconnect(self):
"""
重连
:return:
"""
while not self.gateway.futures_connected:
self.writeLog(u'okex Api_contract 等待10秒后重新连接')
self.connect(self.apiKey, self.secretKey, self.passphrase, self.trace)
sleep(10)
if not self.gateway.futures_connected:
self.reconnect()
def writechaLog(self, content):
"""
:param content:
:return:
"""
self.writeLog(content)
def onClose(self):
"""接口断开"""
# 如果尚未连上,则忽略该次断开提示
if not self.gateway.futures_connected:
return
self.gateway.futures_connected = False
self.writeLog(u'期货服务器连接断开')
# 重新连接
if self.active:
self.writeLog(u'重新连接期货服务器')
t = Thread(target=self.reconnect)
t.start()
def dealSymbolFunc(self, symbol):
"""
分解委托单symbol
:param symbol:
:return:
"""
if symbol.find(':') == -1:
symbol_pair = symbol
symbol = symbol
contract_type = 'this_week'
leverage = '10'
return (symbol_pair, symbol, contract_type, leverage)
arr = symbol.split('.')
symbol_pair = arr[0]
l = symbol_pair.split(':')
if len(l) !=3:
self.gateway.writeError(u'合约代码{}错误:'.format(symbol))
raise ValueError(u'合约代码{}错误:'.format(symbol))
symbol, contract_type, leverage = l[0], l[1], l[2]
if contract_type not in CONTRACT_TYPE:
self.gateway.writeError(u'合约类型错误:{}'.format(contract_type))
raise ValueError(u'合约类型{}不在:{}中'.format(contract_type,CONTRACT_TYPE))
symbol = symbol.replace("_usd", "")
return (symbol_pair, symbol, contract_type, leverage)
def subscribe(self, subscribeReq):
"""
订阅行情,外接okexGateway, 在vtEngine中的dataEngine进行调用
:param subscribeReq:
:return:
"""
self.subscribeSingleSymbol(subscribeReq.symbol)
def subscribeSingleSymbol(self, symbol):
"""
订阅单个对象的行情(Ticker, depth, Trade)
:param symbol: 合约,如btc,eth,etc等
:param contract_type: 合约类型,当周,下周,季度
:param leverage:杠杆倍数
:return:
"""
self.subsribeFutureTicker(symbol)
self.subscribeFutureDepth(symbol)
self.subscribeFutureTrades(symbol)
self.queryFutureOrderInfo(symbol, status="6", current_page=1, page_length=50)
def treatFutureOrderInfo(self, symbol_pair, message):
"""
处理函数:用于处理查询订单返回来的信息
1.重新设定本地单子
:param message:
:return:
"""
if message['order_info'] == []:
return
orders = message['order_info']
if orders == []:
self.gateway.writeLog(u'{}目前没有订单信息'.format(symbol_pair))
return
for tmp_order in orders:
if tmp_order['client_oid'] == '': # 不是我们的订单
return
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = tmp_order['instrument_id']
order.vtSymbol = '{}.'.format(self.gatewayName) + tmp_order['instrument_id']
order.orderID = tmp_order['client_oid'] # 用户设置的订单ID
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = float(tmp_order['price'])
order.totalVolume = int(tmp_order['size'])
order.tradedVolume = int(tmp_order['filled_qty'])
order.direction, order.offset = priceContractOffsetTypeMap[str(tmp_order['type'])]
order.status = statusMap[int(tmp_order['status'])]
self.orderDict[order.orderID] = order # 重新订阅本地订单
self.gateway.onOrder(order)
def onOpen(self):
"""
连接
1.设置回调函数
2.设置合约信息
3.
"""
self.writeLog(u'服务器OKEX期货————onOpen')
self.gateway.futures_connected = True
self.loginTime = int(datetime.now().strftime('%y%m%d%H%M%S')) * 10000
try:
self.initCallback()
for symbol in CONTRACT_SYMBOL:
for use_contract_type in CONTRACT_TYPE:
for key, items in self.contractInfo.items():
if symbol in key and use_contract_type==items:
use_symbol_name = key
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = use_symbol_name
contract.exchange = EXCHANGE_OKEX
contract.vtSymbol = use_symbol_name + "." + EXCHANGE_OKEX
contract.productClass = PRODUCT_FUTURES
self.gateway.onContract(contract)
for symbol in self.CONTRACT_SYMBOL_SWAP:
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = symbol
contract.exchange = EXCHANGE_OKEX
contract.vtSymbol = symbol + "." + EXCHANGE_OKEX
contract.productClass = PRODUCT_FUTURES
self.gateway.onContract(contract)
self.login()
self.subFutureAllIndexSymbol()
except Exception as ex:
self.gateway.writeError(u'期货onOpen异常:{},{}'.format(str(ex), traceback.format_exc()))
def onOpens(self):
"""
客户端登录成功后,用于订阅行情
:return:
"""
for symbol in self.registered_symbols:
try:
(symbol_pair, symbol, contract_type, leverage) = self.dealSymbolFunc(symbol)
self.subscribeSingleSymbol(symbol)
self.subFutureUserInfo(symbol)
self.subFuturePositionInfo(symbol)
self.queryFuturesAccountInfo(symbol)
self.queryFuturePosition(symbol)
except Exception as ex:
self.gateway.writeError(u'订阅合约行情异常:{},{}'.format(str(ex), traceback.format_exc()))
continue
def queryAllFutureAccountInfo(self):
"""
查询订阅品种的账户信息
:return:
"""
for symbol in self.registered_symbols:
(symbol_pair, symbol, contract_type, leverage) = self.dealSymbolFunc(symbol)
self.queryFuturesAccountInfo(symbol)
def queryAllFuturePositionInfo(self):
"""
查询订阅品种的持仓信息
:return:
"""
for symbol in self.registered_symbols:
(symbol_pair, symbol, contract_type, leverage) = self.dealSymbolFunc(symbol)
self.queryFuturePosition(symbol)
def queryAllFutureOrderInfo(self):
"""
查询订阅品种的未完成订单信息
:return:
"""
for symbol in self.registered_symbols:
(symbol_pair, symbol, contract_type, leverage) = self.dealSymbolFunc(symbol)
self.queryFutureOrderInfo(symbol, status="6", current_page=1, page_length=50)
def treatFutureAccountInfo(self, message, symbol):
"""
处理函数:用于处理查询账户信息返回来的信息
:param symbol:
:param message:
:return:
"""
if message == []:
return
accountInfo = VtAccountData()
accountInfo.accountID = symbol
accountInfo.gatewayName = self.gatewayName
if 'SWAP' in symbol:
accountInfo.balance = float(message['info']['equity']) # 权益
accountInfo.available = float(message['info']['total_avail_balance']) # 账户余额
accountInfo.closeProfit = float(message['info']['realized_pnl'])
accountInfo.positionProfit = float(message['info']['unrealized_pnl'])
else:
margin_mode = message['margin_mode']
if margin_mode == 'crossed':
accountInfo.available = float(message['total_avail_balance']) # 账户余额
accountInfo.balance = float(message['equity']) # 权益
accountInfo.closeProfit = float(message['realized_pnl'])
accountInfo.positionProfit = float(message['unrealized_pnl'])
else:
accountInfo.available = float(message['total_avail_balance']) # 账户余额
accountInfo.balance = float(message['equity']) # 权益
self.gateway.onAccount(accountInfo)
def treatFuturePositionInfo(self, message, symbol):
"""
处理函数:用于处理持仓信息查询返回的内容
:param message:
:param symbol:
:return:
"""
# TODO 设置能检测的东西
data = message['holding'][0]
if 'SWAP' not in symbol:
if 'crossed' in data: # 全仓
# if len(message['holding']) == 0:
# self.gateway.writeLog(u'没有仓位')
# return
# data = message['holding'][0]
# Long
pos = VtPositionData()
pos.position = float(data['long_qty'])
if pos.position != 0:
pos.gatewayName = self.gatewayName
pos.symbol = data['instrument_id']
pos.vtSymbol = self.gatewayName + '.' + pos.symbol
pos.price = float(data['long_avg_cost'])
pos.direction = DIRECTION_LONG
pos.leverage = int(data['leverage'])
pos.margin_mode = data['margin_mode']
pos.positionProfit = data['realised_pnl']
self.gateway.onPosition(pos)
# Short
pos = VtPositionData()
pos.position = float(data['short_qty'])
if pos.position != 0:
pos.gatewayName = self.gatewayName
pos.symbol = data['instrument_id']
pos.vtSymbol = self.gatewayName + '.' + pos.symbol
pos.price = float(data['short_avg_cost'])
pos.direction = DIRECTION_SHORT
pos.leverage = int(data['leverage'])
pos.margin_mode = data['margin_mode']
pos.positionProfit = data['realised_pnl']
self.gateway.onPosition(pos)
else: # 逐仓
# Long
pos = VtPositionData()
pos.position = float(data['long_qty'])
if pos.position != 0:
pos.gatewayName = self.gatewayName
pos.symbol = data['instrument_id']
pos.vtSymbol = self.gatewayName + '.' + pos.symbol
pos.price = float(data['long_avg_cost'])
pos.direction = DIRECTION_LONG
pos.leverage = int(data['leverage'])
pos.margin_mode = data['margin_mode']
pos.positionProfit = data['realised_pnl']
self.gateway.onPosition(pos)
# Short
pos = VtPositionData()
pos.position = float(data['short_qty'])
if pos.position != 0:
pos.gatewayName = self.gatewayName
pos.symbol = data['instrument_id']
pos.vtSymbol = self.gatewayName + '.' + pos.symbol
pos.price = float(data['short_avg_cost'])
pos.direction = DIRECTION_SHORT
pos.leverage = int(data['leverage'])
pos.margin_mode = data['margin_mode']
pos.positionProfit = data['realised_pnl']
self.gateway.onPosition(pos)
else:
if 'crossed' in data: # 全仓
data = message['holding'][0]
pos = VtPositionData()
pos.position = float(data['position'])
pos.price = float(data['avg_cost'])
pos.direction = DIRECTION_LONG if data['side'] == 'long' else DIRECTION_SHORT
pos.positionProfit = float(data['realized_pnl'])
pos.symbol = data['instrument_id']
pos.vtSymbol = self.gatewayName + '.' + pos.symbol
self.gateway.onPosition(pos)
else:
pos = VtPositionData()
pos.position = float(data['position'])
pos.price = float(data['avg_cost'])
pos.direction = DIRECTION_LONG if data['side'] == 'long' else DIRECTION_SHORT
pos.positionProfit = float(data['realized_pnl'])
pos.symbol = data['instrument_id']
pos.vtSymbol = self.gatewayName + '.' + pos.symbol
self.gateway.onPosition(pos)
def writeLog(self, content):
"""
快速记录日志
"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = content
self.gateway.onLog(log)
def initCallback(self):
"""
初始化回调函数. 1.获取合约信息,2.设置回调函数(在onMessage中起作用)
:return
"""
self.queryContractInfo('SWAP')
self.queryContractInfo('JiaoGe')
def onFutureIndexInfo(self, ws_data):
"""
ws, 接收指数合约推送
:param ws_data:
:return:
"""
data = ws_data.get('data')
if data is None:
return
channel = ws_data['channel']
t_date, t_time,_ = self.generateDateTime(float(data["timestamp"]))
float_index = float(data["futureIndex"])
symbol_pattern = self.channelSymbolMap[channel]
for use_contract_type in CONTRACT_TYPE:
symbol = symbol_pattern % (use_contract_type)
push_dic = {"symbol": symbol, "date": t_date, "time": t_time, "index": float_index}
self.gateway.onFutureIndexPush(push_dic)
def checkStatus(self):
"""
检查状态
:return:
"""
if len(self.tickDict) > 0:
symbol,last_tick = list(self.tickDict.items())[0]
# self.gateway.writeLog(u'日常检查,最近tick时间:{}'.format(last_tick.datetime.strftime('%Y-%m-%d %H:%M:%S')))
if (datetime.now() - last_tick.datetime).seconds > 10:
last_tick.datetime = datetime.now()
# self.gateway.writeError(u'【注意:行情出现断开连接】')
self.gateway.writeLog(u'【重新连接】')
# self.onClose()
return False
return True
else:
return False
def onTicker(self, ws_data):
"""
ws, 接收期货行情
:param ws_data:
:return:
"""
data = ws_data[0]
try:
symbol = data['instrument_id']
if symbol not in self.tickDict:
tick = VtTickData()
tick.exchange = EXCHANGE_OKEX
tick.symbol = symbol
tick.vtSymbol = tick.symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
self.bids_depth_dict[symbol] = {}
self.asks_depth_dict[symbol] = {}
else:
tick = self.tickDict[symbol]
tick.datetime = datetime.now()
self.bids_depth_dict[symbol] = {}
self.asks_depth_dict[symbol] = {}
tick.highPrice = float(data['high_24h'])
tick.lowPrice = float(data['low_24h'])
tick.lastPrice = float(data['last'])
tick.volume = float(data['volume_24h'])
except Exception as ex:
self.gateway.writeError(u'ContractApi.onTicker exception:{}'.format(str(ex)))
self.gateway.writeLog(ws_data)
self.gateway.writeLog(u'ContractApi.onTicker exception:{},{}'.format(str(ex), traceback.format_exc()))
def onDepth(self, ws_data):
"""
ws, 接收深度信息
期货深度行情推送。okex期货的深度数据原生返回是张数,需要转换为个数。
转换公式(btc把10改为100,其他币种都是乘10) :张数/成交价*10=个数
:param ws_data:
:return:
"""
symbol = ws_data[0]['instrument_id']
data = ws_data[0]
try:
if symbol not in self.tickDict:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
self.bids_depth_dict[symbol] = {}
self.asks_depth_dict[symbol] = {}
else:
tick = self.tickDict[symbol]
self.bids_depth_dict[symbol] = {}
self.asks_depth_dict[symbol] = {}
tick_bids_depth = self.bids_depth_dict[symbol]
tick_asks_depth = self.asks_depth_dict[symbol]
# 更新bids得价格深度
for inf in list(data['bids']):
price1, vol1,acc_vol1, acc_vol2 = inf
if abs(float(vol1)) < 0.00001 and price1 in tick_bids_depth:
del tick_bids_depth[price1]
else:
tick_bids_depth[price1] = float(vol1)
try:
# 根据bidPrice价格排序
arr = sorted(tick_bids_depth.items(), key=lambda x: x[0])
tick.bidPrice1, tick.bidVolume1 = arr[-1]
tick.bidPrice2, tick.bidVolume2 = arr[-2]
tick.bidPrice3, tick.bidVolume3 = arr[-3]
tick.bidPrice4, tick.bidVolume4 = arr[-4]
tick.bidPrice5, tick.bidVolume5 = arr[-5]
except Exception as ex:
self.writeLog(u'ContractApi.onDepth exception:{},{}'.format(str(ex), traceback.format_exc()))
for inf in list(data['asks']):
price1, vol1,acc_vol1, acc_vol2 = inf
if abs(float(vol1)) < 0.00001 and price1 in tick_asks_depth:
del tick_asks_depth[price1]
else:
tick_asks_depth[price1] = float(vol1)
try:
# 根据ask价格排序
arr = sorted(tick_asks_depth.items(), key=lambda x: x[0])
# 取前五个
tick.askPrice1, tick.askVolume1 = arr[0]
tick.askPrice2, tick.askVolume2 = arr[1]
tick.askPrice3, tick.askVolume3 = arr[2]
tick.askPrice4, tick.askVolume4 = arr[3]
tick.askPrice5, tick.askVolume5 = arr[4]
except Exception as ex:
self.writeLog(u'ContractApi.onDepth exception:{},{}'.format(str(ex), traceback.format_exc()))
tick.date, tick.time ,tick.datetime= self.generateDateTime(data['timestamp'])
newtick = copy(tick)
self.writeLog('tickTime:{};tickSymbol:{}'.format(tick.time, tick.symbol))
self.gateway.onTick(newtick)
except Exception as ex:
self.writeLog(u'ContractApi.onDepth exception:{},{}'.format(str(ex), traceback.format_exc()))
def onTrade(self, ws_data):
"""
委托全部成交回报
:param ws_data:
:return:
"""
self.writeLog(u'onTrade {}'.format(ws_data))
def treatRespondFutureOrder(self, ws_data):
"""
ws, 绑定本地订单
委托下单请求响应,撤单请求响应,拒单请求响应等,将订单和本地编号挂起
:param ws_data: 出错代码,或者委托成功得order_id
:return:
"""
data = ws_data
self.gateway.writeLog(ws_data)
error_code = data.get('error_code')
if int(error_code) != 0:
self.gateway.writeError(u'onFutureOrder委托返回错误:{}'.format(FUTURES_ERROR_DICT.get(str(error_code))), error_id=error_code)
self.gateway.writeLog(ws_data)
localNo = self.localNoQueue.get_nowait()
if localNo is None:
return
self.gateway.writeLog(u'onFutureOrder移除本地localNo:{}'.format(localNo))
order = self.localOrderDict.get(localNo)
if order:
order.orderID = localNo
order.symbol = order.symbol
order.vtsymbol = '.'.join([self.gatewayName, order.symbol])
order.totalVolume = order.totalVolume
dt = datetime.now()
order.cancelTime = dt.strftime("%H:%M:%S.%f")
order.status = STATUS_REJECTED # 发送期货委托单(拒单消息)到vtGateway
self.gateway.writeLog(u'onFutureOrder发出OnOrder,拒单,vtOrderId={}'.format(localNo))
self.gateway.onOrder(order)
return
ok_order_id = data.get('client_oid')
if ok_order_id is None:
self.gateway.writeError(u'FuturesApi.onFutureOrder 委托返回中,没有orderid')
self.gateway.writeLog(ws_data)
return
localNo = self.localNoQueue.get_nowait() # 从本地编号Queue中,FIFO,提取最早的localNo
if localNo is None:
self.gateway.writeError(u'FuturesApi.onSportOrder,未找到本地LocalNo,检查日志')
self.gateway.writeLog(ws_data)
return
self.localNoDict[localNo] = ok_order_id
self.orderIdDict[ok_order_id] = localNo
def onFutureTrades(self, ws_data):
"""
ws,接收成交信息(onTrade, onOrder)
Trade交易信息回报, 包括onOrder,onTrade信息回报
:param ws_data:
:return:
"""
data = ws_data[0]
orderId = str(data["client_oid"]) # okex的委托编号
if orderId not in self.orderDict:
symbol = data['instrument_id'] # 自动添加_usd结尾
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = symbol # 合约.OKE
order.vtSymbol = self.gatewayName + '.' + symbol
order.orderID = orderId
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = float(data['price'])
order.totalVolume = int(data['size'])
order.tradedVolume = int(data['filled_qty'])
order.direction, order.offset = priceContractOffsetTypeMap[str(data['type'])]
order.status = statusMap[int(data['status'])]
self.orderDict[orderId] = order
else:
order = self.orderDict[orderId]
order.tradedVolume = float(data['filled_qty'])
order.status = statusMap[int(data['status'])]
self.gateway.onOrder(copy(order))
bef_volume = self.recordOrderId_BefVolume.get(orderId, 0.0)
now_volume = float(data['filled_qty']) - bef_volume
if now_volume > 0.000001:
self.recordOrderId_BefVolume[orderId] = float(data['filled_qty'])
trade = VtTradeData()
trade.gatewayName = self.gatewayName
trade.symbol = order.symbol
trade.vtSymbol = order.symbol # + '.{}'.format(EXCHANGE_OKEX)
self.tradeID += 1
trade.tradeID = str(self.tradeID)
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = orderId
trade.vtOrderID = 'OKEX_1.' + orderId
trade.price = float(data['price_avg'])
trade.volume = float(now_volume)
trade.direction, trade.offset = priceContractOffsetTypeMap[str(data['type'])]
times = data['timestamp']
timea, timeb, times = self.generateDateTime(times)
trade.tradeTime = times
self.gateway.onTrade(trade)
def onFuturePositionInfo(self, ws_data):
"""
ws, 接收账户持仓信息(onPosition)
针对单币种的持仓信息回调函数
:param ws_data:
:return:
"""
if isinstance(ws_data, list):
data = ws_data[0] # 针对交割
if 'long_qty' in data and int(data['long_qty']) != 0: # 交割
pos = VtPositionData()
pos.gatewayName = self.gatewayName # + u'_Future'
pos.symbol = data['instrument_id']
pos.vtSymbol = self.gatewayName + '.' + pos.symbol
pos.position = float(data['long_qty'])
pos.price = float(data['long_avg_cost'])
pos.direction = DIRECTION_LONG
pos.leverage = int(data['leverage'])
pos.margin_mode = data['margin_mode']
pos.positionProfit = data['realised_pnl']
self.gateway.onPosition(pos)
if 'short_qty' in data and int(data['short_qty']) != 0:
pos = VtPositionData()
pos.gatewayName = self.gatewayName # + u'_Future'
pos.symbol = data['instrument_id']
pos.vtSymbol = self.gatewayName + '.' + pos.symbol
pos.position = float(data['short_qty'])
pos.price = float(data['short_avg_cost'])
pos.direction = DIRECTION_SHORT
pos.leverage = int(data['leverage'])
pos.margin_mode = data['margin_mode']
pos.positionProfit = data['realised_pnl']
self.gateway.onPosition(pos)
else:
self.gateway.writeLog(u'onFuturePositionInfo 其他信息')
def onFutureAccountInfo(self, ws_data):
"""
ws, 接收账户信息
推送,账户信息(onAccount)
合约账户信息推送(账户权益/已实现盈亏/未实现盈亏/可用/已用/冻结)
:param ws_data:
:return: # TODO: 交割和永续注意一下
"""
# if isinstance(ws_data,list):
data = ws_data[0]
try:
symbol = data['instrument_id']
account = VtAccountData()
account.accountID = symbol
account.vtAccountID = self.gatewayName + '.' + account.accountID
account.gatewayName = self.gatewayName
account.balance = float(data['equity']) # 账户权益
account.closeProfit = float(data['realized_pnl'])
account.positionProfit = float(data['unrealized_pnl'])
account.available = float(data['total_avail_balance'])
account.margin_mode = 'crossed' if data['margin_mode'] == 'crossed' else 'fixed'
self.gateway.onAccount(account)
except:
for symbol, s_inf in data.items():
account = VtAccountData()
account.gatewayName = self.gatewayName
account.balance = float(s_inf['equity']) # 账户权益
account.closeProfit = float(s_inf['realized_pnl'])
account.positionProfit = float(s_inf['unrealized_pnl'])
account.margin = float(s_inf['margin']) # 保证金
account.available = float(s_inf['total_avail_balance'])
account.margin_mode = 'crossed' if s_inf['margin_mode'] == 'crossed' else 'fixed'
account_list = []
for num, key in enumerate(self.contractInfo.keys()):
if symbol in key and 'SWAP' not in key:
account.accountID = key
account.vtAccountID = self.gatewayName + '.' + account.accountID
account_list.append(key)
for account in account_list: # TODO:这里不能对交割合约进行发送account信息
self.gatewayName.onAccount(account)
def sendFutureSendOrder(self, req):
"""
发送委托
:param req:
:return:
"""
try:
(symbol_pair, symbol, contract_type, leverage) = self.dealSymbolFunc(req.symbol)
except Exception as ex:
self.gateway.writeError(u'请求合约代码格式错误:{}'.format(req.symbol))
self.writeLog(u'futureSendOrder 请求合约代码格式错误:{},exception:{},{}'.format(req.symbol,str(ex),traceback.format_exc()))
return ''
# symbol_pair(例如:btc_usd) , symbol(btc), contract_type(this_week) , leverage(10)
type_ = priceContractTypeMapReverse[(req.direction, req.offset)]
self.writeLog(u'futureSendOrder:{},price:{},num:{},type:{}'.format(symbol, req.price, req.volume, type_))
# 本地委托号加1,并将对应字符串保存到队列中,返回基于本地委托号的vtOrderID
self.localNo += 1
localNo = 'Aa'+ str(self.loginTime + self.localNo)
self.localNoQueue.put(str(localNo))
vtOrderID = "{}.{}".format(self.gatewayName, localNo)
self.writeLog(u'futureSendOrder:创建本地订单:orderId:{}'.format(localNo))
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = symbol
order.vtSymbol = "{}.{}".format(symbol, EXCHANGE_OKEX)
order.orderID = localNo
order.vtOrderID = vtOrderID
order.price = req.price
order.totalVolume = req.volume
order.direction = req.direction
order.offset = req.offset
order.status = STATUS_NOTTRADED
order.orderTime = datetime.now().strftime("%H:%M:%S.%f")
self.localOrderDict[localNo] = order
try:
self.futureTrade(symbol, type_, str(req.price), str(req.volume), order_id=localNo)
return vtOrderID
except Exception as ex:
self.gateway.writeError(u'futureSendOrder发送委托失败:{}'.format(str(ex)))
self.writeLog(u'futureSendOrder发送委托失败.{}'.format(traceback.format_exc()))
return False
def sendFutureCancelOrder(self, req):
"""
发出撤单请求
:param req:
:return:
"""
localNo = req.orderID
status, response = self.queryFutureCancelOrder(req.symbol, localNo)
if status:
self.gateway.writeLog(u'撤单成功')
self.treatFutureOrderCancel(response)
return True
else:
self.gateway.writeLog(u'撤单失败')
return False
def treatFutureOrderCancel(self, ws_data):
"""
委托撤单的响应
:param ws_data:
:return:
"""
ok_order_id = ws_data.get('client_oid','')
order = self.orderDict[ok_order_id]
dt = datetime.now()
order.cancelTime = dt.strftime("%H:%M:%S.%f")
order.status = STATUS_CANCELLED
self.gateway.onOrder(order)
def generateDateTime(self, s):
"""生成时间"""
s = s.split('.')[0]
s = s.split('T')[0]+u' '+s.split('T')[1]
s = td.mktime(td.strptime(s, "%Y-%m-%d %H:%M:%S"))
s = s + 60*60*8
dt = datetime.fromtimestamp(s)
time = dt.strftime("%H:%M:%S.%f")
date = dt.strftime("%Y%m%d")
return date, time,dt
|
from fabric.api import *
from fabric.contrib.files import *
from cloudbio.flavor import Flavor
from cloudbio.custom.shared import (_fetch_and_unpack)
import sys
# This flavour installs the Seal toolkit for processing high-throughput
# sequencing data on Hadoop.
# http://biodoop-seal.sf.net/
#
# It pulls in quite a few dependencies, including Hadoop itself and
# Pydoop (http://pydoop.sf.net/).
#
# The dependencies it pulls into Cloudbiolinux are structured as follows:
#
# contrib/flavor/seal/main.yaml
# sealdist
# customsealdist
#
# config/packages-yum.yaml
# sealdist (metapackage)
# config/custom.yaml
# customsealdist (metapackage)
# - pydoop
# - seal
#
# The components of the customsealdist metapackage are installed through
# the functions in cloudbio/custom/customsealdist.py
#
#
# This flavour has only been installed on Scientific Linux and has not
# yet been well tested.
#
# To try installing it run the following:
# cd <your cloudbiolinux directory>
# fab -f ./fabfile.py -H root@<your host> -c ./contrib/flavor/seal/fabricrc_sl.txt install_biolinux:packagelist=contrib/flavor/seal/main.yaml
#
# Authors: Roman Valls Guimera <roman.valls.guimera@scilifelab.se>
# Luca Pireddu <luca.pireddu@crs4.it>
class SealFlavor(Flavor):
"""A flavour for installing Seal
"""
def __init__(self, env):
Flavor.__init__(self,env)
self.name = "Seal Flavor"
def rewrite_config_items(self, name, packages):
if name == 'packages':
if sys.version_info < (2,7):
# for versions of Python prior to 2.7 we need to add importlib
# and argparse
packages.extend([
"python-importlib",
"python-argparse"
])
return packages
def post_install(self):
env.logger.info("Starting post-install")
pass
env.flavor = SealFlavor(env)
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.io
from abc import abstractmethod
from .x_output_stream import XOutputStream as XOutputStream_a4e00b35
class XDataOutputStream(XOutputStream_a4e00b35):
"""
makes it possible to write machine-independent simple data types to a stream.
See Also:
`API XDataOutputStream <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1io_1_1XDataOutputStream.html>`_
"""
__ooo_ns__: str = 'com.sun.star.io'
__ooo_full_ns__: str = 'com.sun.star.io.XDataOutputStream'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.io.XDataOutputStream'
@abstractmethod
def writeBoolean(self, Value: bool) -> None:
"""
writes a boolean.
It is an 8-bit value. 0 means FALSE; all other values mean TRUE.
Raises:
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def writeByte(self, Value: int) -> None:
"""
writes an 8-bit byte.
Raises:
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def writeChar(self, Value: str) -> None:
"""
writes a 16-bit character.
Raises:
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def writeDouble(self, Value: float) -> None:
"""
writes a 64-bit IEEE double.
Raises:
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def writeFloat(self, Value: float) -> None:
"""
writes a 32-bit IEEE float.
Raises:
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def writeHyper(self, Value: int) -> None:
"""
writes a 64-bit big endian integer.
Raises:
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def writeLong(self, Value: int) -> None:
"""
writes a 32-bit big endian integer.
Raises:
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def writeShort(self, Value: int) -> None:
"""
writes a 16-bit big endian integer.
Raises:
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def writeUTF(self, Value: str) -> None:
"""
writes a string in UTF format.
Raises:
com.sun.star.io.IOException: ``IOException``
"""
__all__ = ['XDataOutputStream']
|
import json
from Acquisition import aq_inner
from five import grok
from zope import schema
from zope.component import getUtility
from zope.lifecycleevent import modified
from plone.dexterity.content import Container
from plone.namedfile.interfaces import IImageScaleTraversable
from plone.supermodel import model
from plone.directives import form
from Products.statusmessages.interfaces import IStatusMessage
from wigo.statusapp.tool import IWigoTool
from wigo.statusapp import MessageFactory as _
class IServerNode(model.Schema, IImageScaleTraversable):
"""
Server details
"""
server = schema.TextLine(
title=_(u"Server Name"),
description=_(u"Enter a fully qualified servername"),
required=True
)
ip = schema.TextLine(
title=_(u"IP Address"),
required=False,
)
machine = schema.TextLine(
title=_(u"Physical Server"),
description=_(u"name of the physical machine this virtual server "
u"is located on"),
required=False,
)
protocol = schema.TextLine(
title=_(u"Request Protocol"),
description=_(u"Specify alternative protocol e.g. smtp for mx server"),
default=u"http",
required=True
)
form.mode(serverdetails='hidden')
serverdetails = schema.TextLine(
title=_(u"server Details"),
description=_(u"Serverdetails json storage. You normally should have "
u"no need to change this manually"),
required=False,
)
class ServerNode(Container):
grok.implements(IServerNode)
class View(grok.View):
""" sample view class """
grok.context(IServerNode)
grok.require('zope2.View')
grok.name('view')
def details(self):
context = aq_inner(self.context)
data = getattr(context, 'serverdetails')
return json.loads(data)
def has_server_info(self):
data = self.details()
if 'nginx' in data.keys():
return True
return False
def check_server_status(self):
host = getattr(self.context, 'server')
protocol = getattr(self.context, 'protocol', 'http')
tool = getUtility(IWigoTool)
status = tool.status(hostname=host, service=protocol)
return status
class JSONView(grok.View):
grok.context(IServerNode)
grok.require('zope2.View')
grok.name('json-view')
def render(self):
context = aq_inner(self.context)
data = getattr(context, 'serverdetails')
report = json.loads(data)
self.request.response.setHeader("Content-Type", "application/json")
return json.dumps(report)
class ServerDetails(grok.View):
grok.context(IServerNode)
grok.require('cmf.ModifyPortalContent')
grok.name('update-serverdetails')
def render(self):
context = aq_inner(self.context)
tool = getUtility(IWigoTool)
hostname = getattr(context, 'server', '')
if hostname is not None:
data = tool.get(hostname=hostname)
setattr(context, 'serverdetails', json.dumps(data))
modified(context)
context.reindexObject(idxs='modified')
IStatusMessage(self.request).addStatusMessage(
_(u"The panel has successfully been updated"),
type='info')
next_url = context.absolute_url()
return self.request.response.redirect(next_url)
|
import unittest
import numpy as np
import openmdao.api as om
from openmdao.utils.assert_utils import assert_rel_error
class TestNonlinearCircuit(unittest.TestCase):
def test_nonlinear_circuit_analysis(self):
import numpy as np
import openmdao.api as om
class Resistor(om.ExplicitComponent):
"""Computes current across a resistor using Ohm's law."""
def initialize(self):
self.options.declare('R', default=1., desc='Resistance in Ohms')
def setup(self):
self.add_input('V_in', units='V')
self.add_input('V_out', units='V')
self.add_output('I', units='A')
# partial derivs are constant, so we can assign their values in setup
R = self.options['R']
self.declare_partials('I', 'V_in', val=1 / R)
self.declare_partials('I', 'V_out', val=-1 / R)
def compute(self, inputs, outputs):
deltaV = inputs['V_in'] - inputs['V_out']
outputs['I'] = deltaV / self.options['R']
class Diode(om.ExplicitComponent):
"""Computes current across a diode using the Shockley diode equation."""
def initialize(self):
self.options.declare('Is', default=1e-15, desc='Saturation current in Amps')
self.options.declare('Vt', default=.025875, desc='Thermal voltage in Volts')
def setup(self):
self.add_input('V_in', units='V')
self.add_input('V_out', units='V')
self.add_output('I', units='A')
# non-linear component, so we'll declare the partials here but compute them in compute_partials
self.declare_partials('I', 'V_in')
self.declare_partials('I', 'V_out')
def compute(self, inputs, outputs):
deltaV = inputs['V_in'] - inputs['V_out']
Is = self.options['Is']
Vt = self.options['Vt']
outputs['I'] = Is * (np.exp(deltaV / Vt) - 1)
def compute_partials(self, inputs, J):
deltaV = inputs['V_in'] - inputs['V_out']
Is = self.options['Is']
Vt = self.options['Vt']
I = Is * np.exp(deltaV / Vt)
J['I', 'V_in'] = I/Vt
J['I', 'V_out'] = -I/Vt
class Node(om.ImplicitComponent):
"""Computes voltage residual across a node based on incoming and outgoing current."""
def initialize(self):
self.options.declare('n_in', default=1, types=int, desc='number of connections with + assumed in')
self.options.declare('n_out', default=1, types=int, desc='number of current connections + assumed out')
def setup(self):
self.add_output('V', val=5., units='V')
for i in range(self.options['n_in']):
i_name = 'I_in:{}'.format(i)
self.add_input(i_name, units='A')
self.declare_partials('V', i_name, val=1)
for i in range(self.options['n_out']):
i_name = 'I_out:{}'.format(i)
self.add_input(i_name, units='A')
self.declare_partials('V', i_name, val=-1)
# note: we don't declare any partials wrt `V` here,
# because the residual doesn't directly depend on it
def apply_nonlinear(self, inputs, outputs, residuals):
residuals['V'] = 0.
for i_conn in range(self.options['n_in']):
residuals['V'] += inputs['I_in:{}'.format(i_conn)]
for i_conn in range(self.options['n_out']):
residuals['V'] -= inputs['I_out:{}'.format(i_conn)]
class Circuit(om.Group):
def setup(self):
self.add_subsystem('n1', Node(n_in=1, n_out=2), promotes_inputs=[('I_in:0', 'I_in')])
self.add_subsystem('n2', Node()) # leaving defaults
self.add_subsystem('R1', Resistor(R=100.), promotes_inputs=[('V_out', 'Vg')])
self.add_subsystem('R2', Resistor(R=10000.))
self.add_subsystem('D1', Diode(), promotes_inputs=[('V_out', 'Vg')])
self.connect('n1.V', ['R1.V_in', 'R2.V_in'])
self.connect('R1.I', 'n1.I_out:0')
self.connect('R2.I', 'n1.I_out:1')
self.connect('n2.V', ['R2.V_out', 'D1.V_in'])
self.connect('R2.I', 'n2.I_in:0')
self.connect('D1.I', 'n2.I_out:0')
self.nonlinear_solver = om.NewtonSolver()
self.linear_solver = om.DirectSolver()
self.nonlinear_solver.options['iprint'] = 2
self.nonlinear_solver.options['maxiter'] = 10
self.nonlinear_solver.options['solve_subsystems'] = True
self.nonlinear_solver.linesearch = om.ArmijoGoldsteinLS()
self.nonlinear_solver.linesearch.options['maxiter'] = 10
self.nonlinear_solver.linesearch.options['iprint'] = 2
p = om.Problem()
model = p.model
model.add_subsystem('ground', om.IndepVarComp('V', 0., units='V'))
model.add_subsystem('source', om.IndepVarComp('I', 0.1, units='A'))
model.add_subsystem('circuit', Circuit())
model.connect('source.I', 'circuit.I_in')
model.connect('ground.V', 'circuit.Vg')
p.setup()
# set some initial guesses
p['circuit.n1.V'] = 10.
p['circuit.n2.V'] = 1e-3
p.run_model()
assert_rel_error(self, p['circuit.n1.V'], 9.90804735, 1e-5)
assert_rel_error(self, p['circuit.n2.V'], 0.71278185, 1e-5)
assert_rel_error(self, p['circuit.R1.I'], 0.09908047, 1e-5)
assert_rel_error(self, p['circuit.R2.I'], 0.00091953, 1e-5)
assert_rel_error(self, p['circuit.D1.I'], 0.00091953, 1e-5)
# sanity check: should sum to .1 Amps
assert_rel_error(self, p['circuit.R1.I'] + p['circuit.D1.I'], .1, 1e-6)
if __name__ == "__main__":
unittest.main()
|
""" Timeflux """
# Fix Ctrl-C handling in Windows
import os
os.environ["FOR_DISABLE_CONSOLE_CTRL_HANDLER"] = "1"
# Versioning
try:
from setuptools_scm import get_version
__version__ = get_version(root="..", relative_to=__file__)
except:
try:
from .version import version
__version__ = version
except:
__version__ = "0.0.0"
|
from models.base.connection.ConnectionQueueBase import ConnectionQueueBase
from typing import List
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from models.base.connection.ConnectionDatabaseBase import ConnectionDatabaseBase
from models.base.connection.ConnectionFileBase import ConnectionFileBase
from models.base.EntityBase import EntityBase
from infrastructure.json.BaseConverter import BaseConverter
@BaseConverter.register
class ConnectorTypeBase(EntityBase):
def __init__(self,
Name: int = None,
ConnectionTypeId: int = None,
ConnectionType = None,
Databases: List[ConnectionDatabaseBase]=[],
Files: List[ConnectionFileBase] = [],
Queues: List[ConnectionQueueBase]=[],
*args, **kwargs):
super().__init__(*args, **kwargs)
self.Queues = Queues
self.Files = Files
self.Databases = Databases
self.Name: int = Name
self.ConnectionTypeId: int = ConnectionTypeId
self.ConnectionType = ConnectionType
|
from .extensions import db, ma
from celery import Celery
from flask import Flask
from flask import g
from flask_bootstrap import Bootstrap
from .config import config, SELECTED_CONFIG
from .views import recipes
def create_celery_app(app=None):
app = app or create_app()
celery = Celery(__name__, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
celery.app = app
return celery
def create_before_request(app):
def before_request():
g.db = db
return before_request
def create_app():
"""Create and configure an instance of the Flask application."""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(config[SELECTED_CONFIG])
db.init_app(app)
app.register_blueprint(recipes)
ma.init_app(app)
Bootstrap(app)
app.before_request(create_before_request(app))
return app
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base class for all hooks"""
import logging
import warnings
from typing import TYPE_CHECKING, Any, Dict, List
from airflow.typing_compat import Protocol
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.log.secrets_masker import redact
if TYPE_CHECKING:
from airflow.models.connection import Connection # Avoid circular imports.
log = logging.getLogger(__name__)
class BaseHook(LoggingMixin):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
@classmethod
def get_connections(cls, conn_id: str) -> List["Connection"]:
"""
Get all connections as an iterable, given the connection id.
:param conn_id: connection id
:return: array of connections
"""
warnings.warn(
"`BaseHook.get_connections` method will be deprecated in the future."
"Please use `BaseHook.get_connection` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
return [cls.get_connection(conn_id)]
@classmethod
def get_connection(cls, conn_id: str) -> "Connection":
"""
Get connection, given connection id.
:param conn_id: connection id
:return: connection
"""
from airflow.models.connection import Connection
conn = Connection.get_connection_from_secrets(conn_id)
if conn.host:
log.info(
"Using connection to: id: %s. Host: %s, Port: %s, Schema: %s, Login: %s, Password: %s, "
"extra: %s",
conn.conn_id,
conn.host,
conn.port,
conn.schema,
conn.login,
redact(conn.password),
redact(conn.extra_dejson),
)
return conn
@classmethod
def get_hook(cls, conn_id: str) -> "BaseHook":
"""
Returns default hook for this connection id.
:param conn_id: connection id
:return: default hook for this connection
"""
# TODO: set method return type to BaseHook class when on 3.7+.
# See https://stackoverflow.com/a/33533514/3066428
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self) -> Any:
"""Returns connection for the hook."""
raise NotImplementedError()
@classmethod
def get_connection_form_widgets(cls) -> Dict[str, Any]:
...
@classmethod
def get_ui_field_behaviour(cls) -> Dict[str, Any]:
...
class DiscoverableHook(Protocol):
"""
Interface that providers *can* implement to be discovered by ProvidersManager.
It is not used by any of the Hooks, but simply methods and class fields described here are
implemented by those Hooks. Each method is optional -- only implement the ones you need.
The conn_name_attr, default_conn_name, conn_type should be implemented by those
Hooks that want to be automatically mapped from the connection_type -> Hook when get_hook method
is called with connection_type.
Additionally hook_name should be set when you want the hook to have a custom name in the UI selection
Name. If not specified, conn_name will be used.
The "get_ui_field_behaviour" and "get_connection_form_widgets" are optional - override them if you want
to customize the Connection Form screen. You can add extra widgets to parse your extra fields via the
get_connection_form_widgets method as well as hide or relabel the fields or pre-fill
them with placeholders via get_ui_field_behaviour method.
Note that the "get_ui_field_behaviour" and "get_connection_form_widgets" need to be set by each class
in the class hierarchy in order to apply widget customizations.
For example, even if you want to use the fields from your parent class, you must explicitly
have a method on *your* class:
.. code-block:: python
@classmethod
def get_ui_field_behaviour(cls):
return super().get_ui_field_behaviour()
You also need to add the Hook class name to list 'hook_class_names' in provider.yaml in case you
build an internal provider or to return it in dictionary returned by provider_info entrypoint in the
package you prepare.
You can see some examples in airflow/providers/jdbc/hooks/jdbc.py.
"""
conn_name_attr: str
default_conn_name: str
conn_type: str
hook_name: str
@staticmethod
def get_connection_form_widgets() -> Dict[str, Any]:
"""
Returns dictionary of widgets to be added for the hook to handle extra values.
If you have class hierarchy, usually the widgets needed by your class are already
added by the base class, so there is no need to implement this method. It might
actually result in warning in the logs if you try to add widgets that have already
been added by the base class.
Note that values of Dict should be of wtforms.Field type. It's not added here
for the efficiency of imports.
"""
...
@staticmethod
def get_ui_field_behaviour() -> Dict[str, Any]:
"""
Returns dictionary describing customizations to implement in javascript handling the
connection form. Should be compliant with airflow/customized_form_field_behaviours.schema.json'
If you change conn_type in a derived class, you should also
implement this method and return field customizations appropriate to your Hook. This
is because the child hook will have usually different conn_type and the customizations
are per connection type.
.. seealso::
:class:`~airflow.providers.google.cloud.hooks.compute_ssh.ComputeSSH` as an example
"""
...
|
"""
Various views for the app.
"""
from datetime import datetime
import json
from flask import (
render_template,
request
)
from bookmarks import (
app,
util,
)
from bookmarks.db import (
db,
Bookmark,
BookmarkEncoder,
)
config_file = "config.json"
config = {}
@app.before_first_request
def init():
"""
Initialize the app. This creates the database if it does not exist and
loads in the configuration.
"""
db.create_all()
@app.route("/")
def bookmarks():
user = config["user"]
return render_template("page.html", user=user, bookmarks=Bookmark.query.all())
@app.route("/api/bookmarks", methods=['GET'])
def api_get():
return json.dumps(Bookmark.query.all(), cls=BookmarkEncoder)
@app.route("/api/bookmarks", methods=['POST'])
def api_post():
if request.method == 'GET':
return json.dumps(Bookmark.query.all(), cls=BookmarkEncoder)
try:
request_data = json.loads(request.data)
except ValueError:
return json.dumps({'error': 'Invalid JSON'}), 400
if not validate_key(request_data):
return json.dumps({'error': 'Invalid API key'}), 403
app.logger.info("Starting processing")
for bookmark in request_data['bookmarks']:
if not bookmark_exists(bookmark):
if 'url' not in bookmark:
return json.dumps({'error': 'No URL given'}), 400
if 'desc' in bookmark:
desc = bookmark['desc']
else:
desc = util.get_page_title(bookmark['url'])
if not desc:
return json.dumps({'error': 'No description available'}), 400
favicon = util.get_favicon_link(bookmark['url'])
db.session.add(Bookmark(url=bookmark['url'], desc=desc, favicon=favicon))
db.session.commit()
app.logger.info("Processing complete")
response = {'error': None, 'bookmarks': Bookmark.query.all()}
return json.dumps(response, cls=BookmarkEncoder)
@app.route("/api/bookmarks/<int:id>", methods=['GET'])
def api_get_bookmark(id):
return json.dumps(Bookmark.query.filter_by(id=id).first_or_404(), cls=BookmarkEncoder)
@app.route("/api/bookmarks/<int:id>", methods=['DELETE'])
def api_delete_bookmark(id):
try:
request_data = json.loads(request.data)
except ValueError:
return json.dumps({'error': 'Invalid JSON'}), 400
if not validate_key(request_data):
return json.dumps({'error': 'Invalid API key'}), 403
bookmark = Bookmark.query.filter_by(id=id).first_or_404()
db.session.delete(bookmark)
db.session.commit()
response = {'error': None, 'bookmark': bookmark}
return json.dumps(response, cls=BookmarkEncoder)
def validate_key(data):
return ('key' in data) and (data['key'] == config['key'])
def bookmark_exists(bookmark):
for bmark in Bookmark.query.all():
if bmark.url == bookmark['url']:
return True
if 'desc' in bookmark and bmark.url == bookmark['url']:
return True
return False
def main():
global config
with open(config_file, "r") as config_data:
config = json.load(config_data)
host = config.get("host", "0.0.0.0")
port = config.get("port", 8080)
app.run(host=host, port=port)
if __name__ == '__main__':
main()
|
import cv2
import pafy
face_cascade=cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
ds_factor=0.6
class VideoCamera(object):
def __init__(self):
video_url = "https://www.youtube.com/watch?v=cCXB97tRouM"
youtube_url = pafy.new(video_url)
stream_url = youtube_url.getbest(preftype="mp4").url
self.video = cv2.VideoCapture(stream_url)
def __del__(self):
self.video.release()
def get_frame(self):
success, image = self.video.read()
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
|
'''
Author: yangzuo
Date: 2021-04-20 15:48:37
Email: yangzuo@tencent.com
LastEditors: yangzuo
LastEditTime: 2021-04-20 15:51:18
FilePath: /leetcode_solution/solutions/Tree/104.py
'''
'''
给定一个二叉树,找出其最大深度。
二叉树的深度为根节点到最远叶子节点的最长路径上的节点数。
说明: 叶子节点是指没有子节点的节点。
示例:
给定二叉树 [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
返回它的最大深度 3 。
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxDepth(self, root: TreeNode) -> int:
if root is None:
return 0
return max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1
|
# -*- coding: utf-8 -*-
from flask import jsonify, request, session, Blueprint
from models.account import User, AnonymousUser
from utils.decorators import login_required
from weblist import db
auth = Blueprint('auth', __name__, url_prefix='/api/auth')
@auth.route('/register/', methods=['POST'])
def register():
username = request.form.get('username')
password = request.form.get('password')
password_repeat = request.form.get('password_repeat')
if not user or not password or not password_repeat:
return jsonify(code=101, msg=u'参数不全')
if password_repeat != password:
return jsonify(code=101, msg=u'密码不一致')
row = User(username=username)
row.set_password(password)
db.session.add(row)
db.session.commit()
return jsonify(code=200, msg=u'注册成功')
@auth.route('/login/', methods=['POST'])
def login():
username = request.form.get('username')
password = request.form.get('password')
if not username or not password:
return jsonify(code=101, msg=u'参数不全')
row = User.query.filter_by(username=username, is_valid=True).first()
if not row:
return jsonify(code=102, msg=u'用户不存在')
if not row.check_password(password):
return jsonify(code=101, msg=u'密码错误')
session['user_id'] = row.id
return jsonify(code=200, msg=u'登录成功')
@auth.route('/logout/')
def logout():
session.clear()
return jsonify(code=200, msg=u'退出成功')
@auth.route('/user/')
@login_required
def user():
return jsonify(
code=200,
data=dict(id=request.user.id,
username=request.user.username))
|
from time import ctime
from flask import Flask
from flaskext.enterprise import Enterprise
from controller import user_controller
from mongoengine import *
import config
app = Flask(__name__)
connect('db')
enterprise = Enterprise(app)
String = enterprise._sp.String
Integer = enterprise._sp.Integer
Boolean = enterprise._sp.Boolean
Array = enterprise._scls.Array
class DemoService(enterprise.SOAPService):
__soap_server_address__ = '/soap'
__soap_target_namespace__ = 'soap'
@enterprise.soap(String, String, String, _returns=String)
def signIn(self, name, surname, jmbg):
token = user_controller.signInUser(name, surname, jmbg)
return token
@enterprise.soap(String, _returns=Boolean)
def checkToken(self, token):
isValid = user_controller.checkToken(token)
return isValid
@enterprise.soap(String, _returns=Boolean)
def deactivateToken(self, token):
success = user_controller.deactivateToken(token)
return success
@enterprise.soap(_returns=Array(String))
def getActiveTokens(self):
listTokens = user_controller.getActiveTokens()
return listTokens
if __name__ == '__main__':
app.run(host=config.SOAP_HOST, port=config.SOAP_PORT)
|
import unittest
from dcp.problems.daily.first_missing import first_missing1
class Test_FirstMissing1(unittest.TestCase):
def setUp(self):
pass
def test_case1(self):
assert first_missing1([3, 4, -1, 1]) == 2
def test_case2(self):
assert first_missing1([1, 2, 0]) == 3
def test_case3(self):
assert first_missing1([1, 2, 2, 1, 0]) == 3
from dcp.problems.daily.first_missing import first_missing2
class Test_FirstMissing2(unittest.TestCase):
def setUp(self):
pass
def test_case1(self):
assert first_missing2([3, 4, -1, 1]) == 2
def test_case2(self):
assert first_missing2([1, 2, 0]) == 3
def test_case3(self):
assert first_missing2([1, 2, 2, 1, 0]) == 3 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands.parameters import (
get_resource_name_completion_list,
enum_choice_list,
name_type)
from azure.cli.core.commands import register_cli_argument
import azure.cli.core.commands.arm # pylint: disable=unused-import
from azure.mgmt.redis.models.redis_management_client_enums import (
RebootType,
RedisKeyType,
SkuFamily,
SkuName)
from azure.mgmt.redis.models import (
ScheduleEntry,
)
class JsonString(dict):
def __init__(self, value):
super(JsonString, self).__init__()
import json
if value[0] in ("'", '"') and value[-1] == value[0]:
# Remove leading and trailing quotes for dos/cmd.exe users
value = value[1:-1]
dictval = json.loads(value)
self.update(dictval)
class ScheduleEntryList(list):
def __init__(self, value):
super(ScheduleEntryList, self).__init__()
import json
if value[0] in ("'", '"') and value[-1] == value[0]:
# Remove leading and trailing quotes for dos/cmd.exe users
value = value[1:-1]
dictval = json.loads(value)
self.extend([ScheduleEntry(
row['dayOfWeek'],
int(row['startHourUtc']),
row.get('maintenanceWindow', None))
for row in dictval])
register_cli_argument('redis', 'name', arg_type=name_type, help='Name of the redis cache.', completer=get_resource_name_completion_list('Microsoft.Cache/redis'), id_part='name')
register_cli_argument('redis', 'redis_configuration', type=JsonString)
register_cli_argument('redis', 'reboot_type', **enum_choice_list(RebootType))
register_cli_argument('redis', 'key_type', **enum_choice_list(RedisKeyType))
register_cli_argument('redis', 'shard_id', type=int)
register_cli_argument('redis import-method', 'files', nargs='+')
register_cli_argument('redis patch-schedule set', 'schedule_entries', type=ScheduleEntryList)
register_cli_argument('redis create', 'name', arg_type=name_type, completer=None)
register_cli_argument('redis create', 'sku_name', **enum_choice_list(SkuName))
register_cli_argument('redis create', 'sku_family', **enum_choice_list(SkuFamily))
register_cli_argument('redis create', 'sku_capacity', choices=[str(n) for n in range(0, 7)])
register_cli_argument('redis create', 'enable_non_ssl_port', action='store_true')
register_cli_argument('redis create', 'tenant_settings', type=JsonString)
register_cli_argument('redis create', 'shard_count', type=int)
register_cli_argument('redis create', 'subnet_id') # TODO: Create generic id completer similar to name
|
def load_file_lines (file):
""" Loads a file as a list of lines.
Args:
file (str): The path of the file.
Returns:
list of str: A list of lines in the file.
"""
data = []
with open(file, mode='r') as target:
for line in target:
data.append(line.rstrip('\n'))
return data
def load_float_file (file):
""" Loads a data file of newline-delimited floating-point values.
Args:
file (str): The path of the file.
Returns:
list of float: The data from the file.
"""
data = []
with open(file, 'r') as target:
for entry in target:
data.append(float(entry))
return data
|
import copy
import unittest
import requests_mock
import zeep.exceptions
from tbk.soap.requestor import SoapRequest
from tbk.soap.soap_client import SoapClient
from tbk.soap.exceptions import InvalidSignatureResponse, TypeDoesNotExist, SoapServerException, MethodDoesNotExist
from tbk.soap.utils import load_key_from_data
from tbk.soap.wsse import sign_envelope, verify_envelope
from tbk.soap.zeep_client import ZeepSoapClient, ZeepWsseSignature
from .utils import mock, get_fixture_url, get_fixture_data, assert_equal_xml, get_xml_envelope
@requests_mock.Mocker()
class ZeepClientTest(unittest.TestCase):
def setUp(self):
self.wsdl_url = get_fixture_url('WsWebpayService.wsdl')
self.key_data = get_fixture_data('597020000547.key')
self.cert_data = get_fixture_data('597020000547.crt')
self.tbk_cert_data = get_fixture_data('tbk.pem')
self.zeep_client = ZeepSoapClient(self.wsdl_url, self.key_data, self.cert_data, self.tbk_cert_data)
def test_init(self, __):
self.assertIsInstance(self.zeep_client, SoapClient)
def test_get_enum_value(self, __):
for value in ('TR_NORMAL_WS', 'TR_NORMAL_WS_WPM', 'TR_MALL_WS'):
enum_value = self.zeep_client.get_enum_value('wsTransactionType', value)
self.assertEqual(value, enum_value)
def test_get_enum_value_type_error(self, __):
self.assertRaises(TypeDoesNotExist, self.zeep_client.get_enum_value, 'does_not_exist', 'TR_NORMAL_WS')
def test_create_object(self, __):
client = self.zeep_client.client
card_detail_type = client.get_type('ns0:cardDetail')
expected = card_detail_type(cardNumber='1234', cardExpirationDate='12/20')
new_object = self.zeep_client.create_object('cardDetail', cardNumber='1234', cardExpirationDate='12/20')
self.assertEqual(expected, new_object)
def test_create_object_type_error(self, __):
self.assertRaises(
TypeDoesNotExist,
self.zeep_client.create_object, 'does_not_exist', cardNumber='1234', cardExpirationDate='12/20')
def test_create_object_arguments_error(self, __):
self.assertRaises(TypeError, self.zeep_client.create_object, 'cardDetail', does_not_exist='1234')
def test_request_wrong_method(self, __):
with self.assertRaises(MethodDoesNotExist):
request = self.create_soap_request('wrong_method_name', 1)
self.zeep_client.request(request)
def test_request_server_exception(self, __):
method = mock.Mock()
method_name = 'methodName'
setattr(self.zeep_client.client.service, method_name, method)
message = '<!-- Invalid amount(304) -->'
code = 'soap:Server'
method.side_effect = zeep.exceptions.Fault(message, code)
with self.assertRaises(SoapServerException) as context:
request = self.create_soap_request(method_name)
self.zeep_client.request(request)
self.assertEqual(context.exception.error, 'Invalid amount')
self.assertEqual(context.exception.code, 304)
def test_request_verified(self, requests):
expected_response = get_fixture_data('acknowledgeTransaction.response.xml').encode('utf-8')
requests.register_uri(
'POST',
'https://webpay3g.transbank.cl:443/WSWebpayTransaction/cxf/WSWebpayService',
content=expected_response)
with mock.patch('tbk.soap.zeep_client.verify_envelope', return_value=True) as verifier:
request = self.create_soap_request('acknowledgeTransaction', 'token')
self.zeep_client.request(request)
self.assertEqual(1, verifier.call_count)
@mock.patch('tbk.soap.zeep_client.verify_envelope', return_value=True)
def test_request_with_signature(self, requests, ___):
expected_response = get_fixture_data('acknowledgeTransaction.response.xml').encode('utf-8')
requests.register_uri(
'POST',
'https://webpay3g.transbank.cl:443/WSWebpayTransaction/cxf/WSWebpayService',
content=expected_response)
with mock.patch('tbk.soap.zeep_client.sign_envelope', return_value=None) as signer:
request = self.create_soap_request('acknowledgeTransaction', 'token')
self.zeep_client.request(request)
self.assertEqual(1, signer.call_count)
def test_request_not_verified(self, requests):
expected_response = get_fixture_data('acknowledgeTransaction.response.xml').encode('utf-8')
requests.register_uri(
'POST',
'https://webpay3g.transbank.cl:443/WSWebpayTransaction/cxf/WSWebpayService',
content=expected_response)
with mock.patch('tbk.soap.zeep_client.verify_envelope', return_value=False):
request = self.create_soap_request('acknowledgeTransaction', 'token')
self.assertRaises(InvalidSignatureResponse, self.zeep_client.request, request)
@mock.patch('tbk.soap.zeep_client.verify_envelope', return_value=True)
def test_request_sent_received_data(self, requests, __):
expected_response = get_fixture_data('acknowledgeTransaction.response.xml').encode('utf-8')
requests.register_uri(
'POST',
'https://webpay3g.transbank.cl:443/WSWebpayTransaction/cxf/WSWebpayService',
content=expected_response)
acknowledge_transaction_method = self.zeep_client.client.service.acknowledgeTransaction
with mock.patch.object(self.zeep_client.client.service, 'acknowledgeTransaction') as method:
method.side_effect = acknowledge_transaction_method
request = self.create_soap_request('acknowledgeTransaction', 'token')
result, last_sent, last_received = self.zeep_client.request(request)
method.assert_called_once_with('token')
assert_equal_xml(expected_response, last_received)
assert_equal_xml(requests.last_request.text.encode('utf-8'), last_sent)
def create_soap_request(self, method_name, *args, **kwargs):
return SoapRequest(method_name=method_name, args=args, kwargs=kwargs)
class ZeepWssePluginTest(unittest.TestCase):
def setUp(self):
signer_key_data = get_fixture_data('597020000547.key')
signer_cert_data = get_fixture_data('597020000547.crt')
tbk_cert_data = get_fixture_data('tbk.pem')
self.tbk_cert = load_key_from_data(tbk_cert_data, key_format='CERT_PEM')
self.signer_key = load_key_from_data(signer_key_data, signer_cert_data)
self.signer_cert = load_key_from_data(signer_cert_data, key_format='CERT_PEM')
self.envelope = get_xml_envelope('bare.acknowledgeTransaction.response.xml')
self.signed_envelope = copy.deepcopy(self.envelope)
sign_envelope(self.signed_envelope, self.signer_key)
def test_sign_request(self):
plugin = ZeepWsseSignature(self.signer_key, None)
headers = mock.Mock()
result_envelope, result_headers = plugin.apply(self.envelope, headers)
self.assertEqual(headers, result_headers)
self.assertTrue(verify_envelope(result_envelope, self.signer_cert))
def test_verify_response(self):
plugin = ZeepWsseSignature(None, self.signer_cert)
plugin.verify(self.signed_envelope)
def test_do_not_verify_response(self):
plugin = ZeepWsseSignature(None, self.tbk_cert)
self.assertRaises(InvalidSignatureResponse, plugin.verify, self.signed_envelope)
def test_do_not_verify_unsigned_response(self):
plugin = ZeepWsseSignature(None, self.tbk_cert)
self.assertRaises(InvalidSignatureResponse, plugin.verify, self.envelope)
|
"""3D cone shape settings."""
from ctypes import byref, c_float
from .fmodobject import _dll
from .utils import ckresult
class ConeSettings:
"""Convenience wrapper class to handle 3D cone shape settings for simulated
occlusion which is based on direction.
"""
def __init__(self, sptr, class_name):
"""Constructor.
Creates ConeSettings for an FMOD object.
Usually not called directly, but through the
`cone_settings` or `threed_cone_settings` property of an FMOD object.
The :py:class:`~pyfmodex.flags.MODE` flag THREED must be set on this
object otherwise :py:const:`~pyfmodex.enums.RESULT.NEEDS3D` is
returned.
When
:py:meth:`~pyfmodex.channel_control.ChannelControl.cone_orientation` is
set and a 3D 'cone' is set up, attenuation will automatically occur for
a sound based on the relative angle of the direction the cone is
facing, vs the angle between the sound and the listener.
- If the relative angle is within the :py:attr:`inside_angle`, the
sound will not have any attenuation applied.
- If the relative angle is between the :py:attr:`inside_angle` and
:py:attr:`outside_angle`, linear volume attenuation (between 1 and
:py:attr:`outside_volume`) is applied between the two angles until it
reaches the :py:attr:`outside_angle`.
- If the relative angle is outside of the :py:attr:`outside_angle`
the volume does not attenuate any further.
:param sptr: pointer of the object having cone settings.
:param class_name: class of the object having cone settings (Channel or
ChannelGroup)
"""
self._sptr = sptr
self._in = c_float()
self._out = c_float()
self._outvol = c_float()
self._get_func = "FMOD_%s_Get3DConeSettings" % class_name
self._set_func = "FMOD_%s_Set3DConeSettings" % class_name
ckresult(
getattr(_dll, self._get_func)(
self._sptr, byref(self._in), byref(self._out), byref(self._outvol)
)
)
@property
def inside_angle(self):
"""Inside cone angle.
This is the angle spread within which the sound is unattenuated.
Between 0 and 360.
:type: int
"""
return self._in.value
@inside_angle.setter
def inside_angle(self, angle):
self._in = c_float(angle)
self._commit()
@property
def outside_angle(self):
"""Outside cone angle.
This is the angle spread outside of which the sound is attenuated to
its :py:attr:`outside_volume`. Between 0 and 360.
:type: int
"""
return self._out.value
@outside_angle.setter
def outside_angle(self, angle):
self._out = c_float(angle)
self._commit()
@property
def outside_volume(self):
"""Cone outside volume.
Between 0 and 1.
:type: float
"""
return self._outvol.value
@outside_volume.setter
def outside_volume(self, vol):
self._outvol = c_float(vol)
self._commit()
def _commit(self):
"""Apply a changed code setting."""
ckresult(
getattr(_dll, self._set_func)(self._sptr, self._in, self._out, self._outvol)
)
|
import pygame
from pprint import pprint
import glob
from progress.bar import Bar
from classes.deck import Deck
from classes.cards import Card
from classes.player import Player
from classes.dealer import Dealer
from classes.table import Table
from classes.game import Game
def load_imgs(folder):
files = glob.glob(folder + "/*.png")
imgs = {}
bar = Bar('Loading Images', max=len(files))
for file in files:
img_name = file.strip("imgs\\").strip(".png")
img = pygame.image.load(file)
img = pygame.transform.scale(img, (200, 300))
imgs[img_name] = img
bar.next()
bar.finish()
return imgs
if __name__ == '__main__':
pygame.init()
#Loading
imgs = load_imgs("imgs")
s_heigh = 720
s_width = 1200
screen = pygame.display.set_mode((s_width, s_heigh))
pygame.display.set_caption("Poker Game")
clock = pygame.time.Clock()
while (True):
clock.tick(50)
# Process events
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
#get keys pressed
keys = pygame.key.get_pressed()
if keys[pygame.K_q]:
# pygame.mixer.music.stop()
pygame.quit()
quit()
# Clear the screen
screen.fill((0, 0, 0))
screen.blit(imgs['2C'],(0,0))
screen.blit(imgs['4H'],(0,10))
pygame.display.update()
# game = Game()
# game.build(2,['igor','bob'])
# game.play_turn()
|
# from django.test import TestCase
# from .models import Project,Rating
# from django.contrib.auth.models import User
# # Create your tests here.
# class ProjectTestClass(TestCase):
# # Set up method
# def setUp(self):
# # Creating a new location and saving it
# self.new_user=User(username='mary',email='mmarynjerikamau@gmail.com',password='njeri2018')
# self.new_user.save()
# self.new_project= Project(user=self.new_user,title='Pizza Shop',url='https://localhost:8000',description='This ia a django test description',technologies='Django')
# self.new_project.save()
# # Tear Down method
# def tearDown(self):
# Project.objects.all().delete()
# User.objects.all().delete()
# # Testing instance
# def test_instance(self):
# self.assertTrue(isinstance(self.new_project,Project))
# # Testing Save Method
# def test_save_method(self):
# self.new_project1= Project(user=self.new_user,title='Pizza Shop',url='https://localhost:8000',description='This ia a django test description',technologies='Django')
# self.new_project1.save_project()
# projects = Project.objects.all()
# self.assertTrue(len(projects) == 2)
# # Testing get all images Method
# def test_get_all_projects_method(self):
# projects = Project.get_all_projects()
# self.assertTrue(len(projects) == 1)
# # Testing get all images Method
# def test_get_project_by_id_method(self):
# project = Project.get_project_by_id(self.new_project.id)
# self.assertEqual(project.id,self.new_project.id)
# # Testing delete method
# def test_delete_project(self):
# Project.delete_project(self.new_project.id)
# projects = Project.get_all_projects()
# self.assertTrue(len(projects) == 0)
# # Testing search project by title method
# def test_search_project(self):
# projects=Project.search_project('zza')
# projectss=Project.search_project('Taa')
# self.assertFalse(len(projectss) > 0)
# self.assertTrue(len(projects) > 0)
# # Testing filter by userid method
# def test_filter_by_userid(self):
# projects=Project.filter_by_userid(self.new_user.id)
# self.assertTrue(len(projects) > 0)
# class RatingTestClass(TestCase):
# # Set up method
# def setUp(self):
# # Creating a new location and saving it
# self.new_user=User(username='denno',email='a@gmail.com',password='qwerty1234')
# self.new_user.save()
# self.new_project= Project(user=self.new_user,title='Pizza Shop',url='https://localhost:8000',description='This ia a django test description',technologies='Django')
# self.new_project.save()
# self.new_rating=Rating(user=self.new_user,project=self.new_project,design=5,usability=8,content=7,score=6.67)
# self.new_rating.save()
# # Tear Down method
# def tearDown(self):
# Rating.objects.all().delete()
# Project.objects.all().delete()
# User.objects.all().delete()
# # Testing instance
# def test_instance(self):
# self.assertTrue(isinstance(self.new_rating,Rating))
# # Testing Save Method
# def test_save_method(self):
# self.new_rating1= Rating(user=self.new_user,project=self.new_project,design=5,usability=8,content=7,score=6.67)
# self.new_rating1.save_rating()
# ratings = Rating.objects.all()
# self.assertTrue(len(ratings) == 2)
# # Testing get_project_ratings Method
# def test_get_project_ratings_method(self):
# self.new_rating1= Rating(user=self.new_user,project=self.new_project,design=5,usability=8,content=7,score=6.67)
# self.new_rating1.save_rating()
# ratings = Rating.get_project_ratings(self.new_project.id)
# self.assertTrue(len(ratings) == 2) |
import numpy as np
class Part(object):
"""
얼굴 일부분 인식 후 x,y 좌표 반환
"""
FACE_TOP = [21, 22]
FACE_BOTTOM = [7, 8, 9]
FACE_LEFT = [0, 1, 2, 3]
FACE_RIGHT = [13, 14, 15, 16]
def __init__(self, landmarks, side):
self.frame = None
self.region_list = None
self.x = None
self.y = None
# original_frame으로 cvt 웹캠 영상 받음
self._analyze(landmarks, side)
def region(self, landmarks, points):
"""landmarks 영역 반환"""
region_list = np.array([(landmarks.part(point).x, landmarks.part(point).y) for point in points])
region_list = region_list.astype(np.int32)
return region_list
def location(self, region_list):
"""좌표 반환"""
x = int(sum([point[0] for point in region_list]) / len(region_list))
y = int(sum([point[1] for point in region_list]) / len(region_list))
return x, y
def _analyze(self, landmarks, side):
if side == 0:
points = self.FACE_LEFT
elif side == 1:
points = self.FACE_RIGHT
elif side == 2:
points = self.FACE_TOP
elif side == 3:
points = self.FACE_BOTTOM
else:
return
self.region_list = self.region(landmarks, points)
self.x, self.y = self.location(self.region_list) |
# First install spidev:
# Enable SPI (sudo raspi-config)
# $ sudo apt-get update
# $ sudo apt-get upgrade
# $ sudo apt-get install python-dev
# $ sudo reboot
# $ wget https://github.com/doceme/py-spidev/archive/master.zip
# $ unzip master.zip
# $ cd py-spidev-master
# $ sudo python setup.py install
from spidev import SpiDev
class MCP3008:
def __init__(self, bus = 0, device = 0):
self.bus, self.device = bus, device
self.spi = SpiDev()
self.open()
self.spi.max_speed_hz = 1000000 # 1MHz
def open(self):
self.spi.open(self.bus, self.device)
self.spi.max_speed_hz = 1000000 # 1MHz
def read(self, channel = 0):
cmd1 = 4 | 2 | (( channel & 4) >> 2)
cmd2 = (channel & 3) << 6
adc = self.spi.xfer2([cmd1, cmd2, 0])
data = ((adc[1] & 15) << 8) + adc[2]
return data
def close(self):
self.spi.close()
|
import py, os
class NullPyPathLocal(py.path.local):
def join(self, *args):
return self.__class__(py.path.local.join(self, *args))
def open(self, mode):
return open(os.devnull, mode)
def __repr__(self):
return py.path.local.__repr__(self) + ' [fake]'
|
import os
import random
import glob
import numpy as np
random.seed(1234)
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'VCTK')
print(data_dir)
pathlist = glob.glob(os.path.join(data_dir, "**", "*.spec.npy"))
pathlist = [path for path in pathlist if "whisperized" not in path]
# filter out bad items
#pathlist = [path for path in pathlist if \
# os.path.exists(path.replace(".spec", ".mel")) and \
# os.path.exists(path.replace("wav48", "txt").replace(".spec.npy", ".txt")) and \
# os.path.exists(path.replace("wav48", "txt").replace(".spec.npy", ".phones")) \
#]
seen_speakers = ['p336', 'p240', 'p262', 'p333', 'p297', 'p339', 'p276', 'p269', 'p303', 'p260', 'p250', 'p345', 'p305', 'p283', 'p277', 'p302', 'p280', 'p295', 'p245', 'p227', 'p257', 'p282', 'p259', 'p311', 'p301', 'p265', 'p270', 'p329', 'p362', 'p343', 'p246', 'p247', 'p351', 'p263', 'p363', 'p249', 'p231', 'p292', 'p304', 'p347', 'p314', 'p244', 'p261', 'p298', 'p272', 'p308', 'p299', 'p234', 'p268', 'p271', 'p316', 'p287', 'p318', 'p264', 'p313', 'p236', 'p238', 'p334', 'p312', 'p230', 'p253', 'p323', 'p361', 'p275', 'p252', 'p374', 'p286', 'p274', 'p254', 'p310', 'p306', 'p294', 'p326', 'p225', 'p255', 'p293', 'p278', 'p266', 'p229', 'p335', 'p281', 'p307', 'p256', 'p243', 'p364', 'p239', 'p232', 'p258', 'p267', 'p317', 'p284', 'p300', 'p288', 'p341', 'p340', 'p279', 'p330', 'p360', 'p285']
test_list = [path for path in pathlist if not any([spkr in path for spkr in seen_speakers])]
remain_list = [path for path in pathlist if not path in test_list]
random.shuffle(remain_list)
# TODO: create test list by holding out all data of some specific speakers
cut = int(np.round(len(pathlist) * 0.8))
open('data/VCTK/vctk_train.list', 'w').write("\n".join(remain_list[ :cut]))
open('data/VCTK/vctk_eval.list', 'w').write("\n".join(remain_list[cut: ]))
open('data/VCTK/vctk_test.list', 'w').write("\n".join(test_list))
|
from itertools import count
# ideone.com/aVndFM
def postponed_sieve(): # postponed sieve, by Will Ness
yield 2; yield 3; yield 5; yield 7; # original code David Eppstein,
sieve = {} # Alex Martelli, ActiveState Recipe 2002
ps = postponed_sieve() # a separate base Primes Supply:
p = next(ps) and next(ps) # (3) a Prime to add to dict
q = p*p # (9) its sQuare
for c in count(9,2): # the Candidate
if c in sieve: # c's a multiple of some base prime
s = sieve.pop(c) # i.e. a composite ; or
elif c < q:
yield c # a prime
continue
else: # (c==q): # or the next base prime's square:
s=count(q+2*p,2*p) # (9+6, by 6 : 15,21,27,33,...)
p=next(ps) # (5)
q=p*p # (25)
for m in s: # the next multiple
if m not in sieve: # no duplicates
break
sieve[m] = s # original test entry: ideone.com/WFv4f
def backwardsPrime(start, stop):
print(start, stop)
p = postponed_sieve()
n = [next(p) for i in range(stop+1)]
print(n)
res = [i for i in n if int(str(i)[::-1]) in n and int(str(i)[::-1]) != i]
return res
print(backwardsPrime(9900, 10000))
|
# coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import absolute_import
import unittest
import datadog_api_client.v1
from datadog_api_client.v1.api.dashboard_lists_api import DashboardListsApi # noqa: E501
class TestDashboardListsApi(unittest.TestCase):
"""DashboardListsApi unit test stubs"""
def setUp(self):
self.api = DashboardListsApi() # noqa: E501
def tearDown(self):
pass
def test_create_dashboard_list(self):
"""Test case for create_dashboard_list
Create a dashboard list # noqa: E501
"""
pass
def test_delete_dashboard_list(self):
"""Test case for delete_dashboard_list
Delete a dashboard list # noqa: E501
"""
pass
def test_get_dashboard_list(self):
"""Test case for get_dashboard_list
Get a dashboard list # noqa: E501
"""
pass
def test_list_dashboard_lists(self):
"""Test case for list_dashboard_lists
Get all dashboard lists # noqa: E501
"""
pass
def test_update_dashboard_list(self):
"""Test case for update_dashboard_list
Update a dashboard list # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file pythia.pyre/meshio/MeshIOAscii.py
#
# @brief Python object for reading/writing finite-element mesh from
# simple ASCII file.
#
# Factory: mesh_io
from .MeshIOObj import MeshIOObj
from .meshio import MeshIOAscii as ModuleMeshIOAscii
def validateFilename(value):
"""Validate filename.
"""
if 0 == len(value):
msg = "Filename for ASCII input mesh not specified. " + \
"To test PyLith, run an example as discussed in the manual."
raise ValueError(msg)
try:
open(value, "r")
except IOError:
raise IOError("ASCII input mesh '{}' not found.".format(value))
return value
class MeshIOAscii(MeshIOObj, ModuleMeshIOAscii):
"""Python object for reading/writing finite-element mesh from simple
ASCII file.
Factory: mesh_io
"""
import pythia.pyre.inventory
filename = pythia.pyre.inventory.str("filename", default="",
validator=validateFilename)
filename.meta['tip'] = "Name of mesh file"
from spatialdata.geocoords.CSCart import CSCart
coordsys = pythia.pyre.inventory.facility("coordsys", family="coordsys",
factory=CSCart)
coordsys.meta['tip'] = "Coordinate system associated with mesh."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="meshioascii"):
"""Constructor.
"""
MeshIOObj.__init__(self, name)
return
def preinitialize(self):
"""Do minimal initialization."""
MeshIOObj.preinitialize(self)
ModuleMeshIOAscii.filename(self, self.filename)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""Set members based using inventory.
"""
MeshIOObj._configure(self)
return
def _createModuleObj(self):
"""Create C++ MeshIOAscii object.
"""
ModuleMeshIOAscii.__init__(self)
return
# FACTORIES ////////////////////////////////////////////////////////////
def mesh_io():
"""Factory associated with MeshIOAscii.
"""
return MeshIOAscii()
# End of file
|
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LassoCV
from sklearn.linear_model import Lasso
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = Lasso(random_state=0, max_iter=10000)
alphas = np.logspace(-4, -0.5, 30)
tuned_parameters = [{'alpha': alphas}]
n_folds = 5
clf = GridSearchCV(lasso, tuned_parameters, cv=n_folds, refit=False)
clf.fit(X, y)
scores = clf.cv_results_['mean_test_score']
scores_std = clf.cv_results_['std_test_score']
plt.figure().set_size_inches(8, 6)
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
std_error = scores_std / np.sqrt(n_folds)
plt.semilogx(alphas, scores + std_error, 'b--')
plt.semilogx(alphas, scores - std_error, 'b--')
# alpha=0.2 controls the translucency of the fill color
plt.fill_between(alphas, scores + std_error, scores - std_error, alpha=0.2)
plt.ylabel('CV score +/- std error')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
plt.xlim([alphas[0], alphas[-1]])
# #############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = LassoCV(alphas=alphas, cv=5, random_state=0, max_iter=10000)
k_fold = KFold(3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold.split(X, y)):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show() |
# _*_ coding:utf-8 _*_
import logging
import re
from pyquery import PyQuery as pq
from app.spider_store.utils.content_cleaner import cleaner
from app.spider_store.common import (get_content,)
"""***中华网爬虫***"""
def zhonghua_news_download(url):
i = 1
content_list = []
title = None
source = None
thumbnail_urls = None
while True:
if i == 1:
detail_url = url
else:
detail_url = url.replace(".html", '_{}.html'.format(i))
try:
html = get_content(detail_url, )
except Exception:
raise Exception("获取文章内容超时")
doc = pq(html)
if i == 1:
# 标题
title = doc("div.pleft.mt10 div.article-header h1.title").text()
# 来源
source = doc('div.pleft.mt10 div.article-header div.info div.left small#article-source').text()
# 预处理正文内容
div = doc('div.pleft.mt10 div.viewbox div#main-content').html()
content_list.append(str(div))
i += 1
else:
# 预处理正文内容
div = doc('div.pleft.mt10 div.viewbox div#main-content').html()
content_list.append(str(div))
i += 1
if not re.search(r"下一页</a>", html):
break
if i >= 30:
break
try:
content = ''.join(content_list)
content = cleaner(content)
logging.debug('清洗完成')
except:
raise AssertionError("获取文章内容失败")
# 获取文章内图片
image_urls = re.findall(r'src=[\'|"](.*?)[\'|"]', content, re.S)
# 获取不到返回空列表
assert image_urls, "文章中缺少图片"
image_urls_final = []
for url in image_urls:
regex = re.compile(r'http:|https:')
if regex.match(url):
image_urls_final.append(url)
else:
image_url = 'http://kan.china.com' + url
image_urls_final.append(image_url)
# 缩略图
if not thumbnail_urls:
thumbnail_urls = [image_urls_final[0]]
if (title and source):
data = {
"type": 'news',
"title": title,
"source": source,
"content": content,
"thumbnail_urls": thumbnail_urls,
"image_urls": image_urls_final,
}
else:
raise Exception("获取标题和来源失败")
return data
def zhonghua_video_download(url):
assert url
pass
def zhonghua_spider(url):
if news_type(url) == "video":
return zhonghua_video_download(url)
else:
return zhonghua_news_download(url)
def news_type(url):
# 判断类型
# https://kan.china.com/article/582319.html
if not url:
return "video"
else:
return "news"
download = zhonghua_spider
if __name__ == '__main__':
url = 'https://kan.china.com/article/582319.html'
data = zhonghua_spider(url)
for key, value in data.items():
print(key+':'+'{}'.format(value))
|
'''
# Traffic light detection
'''
import cv2
import numpy as np
import time
from rmracerlib import config as cfg
from rmracerlib.cv.func import sign_direction, direction_check, valid_range
## FILTERS
# brightness filter
#brightness_lower = np.array([0, 0, 255])
#brightness_upper = np.array([180, 255, 255])
brightness_lower = np.array([0, 30, 255])
brightness_upper = np.array([180, 150, 255])
# For changing colour counter filter values in light_detect,
# go to the function where the counter is.
# set kernel for operations
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, ksize = (cfg.TRAFFIC_KERNEL_SIZE, cfg.TRAFFIC_KERNEL_SIZE))
def light_signal(shape):
"""
Expects: HSV Region of Interest
Returns: Colour Detected
"""
# Get picture shape information
rows,cols,dims=shape.shape
# Separate picture channel
h, s, v = cv2.split(shape)
# Pixel color count
g_counter=0
r_counter=0
y_counter=0
# COLOUR FILTER - THIS IS WHERE THE COLOURS GET FILTERED OUT
for i in range(rows):
for j in range(cols):
if (60 <= h[i,j] <= 85) and (v[i,j] <= 248):
g_counter += 1
elif ((170 <= h[i,j] <= 180) or (0 <= h[i,j] <= 14)) and (v[i,j] <= 200):
r_counter += 1
elif (8 <= h[i,j] <= 20) and (v[i,j] <= 254):
y_counter += 1
if g_counter > cfg.COUNTER_THRESHOLD_GREEN:
return "go"
#retun g_counter
if r_counter > cfg.COUNTER_THRESHOLD_RED:
return "stop"
#return r_counter
if y_counter > cfg.COUNTER_THRESHOLD_AMBER:
#print("yellow")
return None
#return "stop"
#return y_counter
else :
return None
def detect_traffic(frame, hsv):
"""
Expects: HSV image of any shape + current frame
Returns: TBD
"""
#hsv = cv2.cvtColor(frame, cfg.COLOUR_CONVERT) # convert to HSV CS
# perform filter and operations
mask = cv2.inRange(hsv, brightness_lower, brightness_upper)
morph_open = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
final = cv2.dilate(morph_open, kernel,iterations=2)
# contours detection
contours, _ = cv2.findContours(final, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
# create a rect box around each object located
x,y,w,h = cv2.boundingRect(cnt)
# OPTION 1: extract Region of Interest (ROI) from original image
#roi = frame[y-15:y+h+15, x-15:x+w+15]
# OPTION 2: extract ROI from HSV colour space (more efficient)
#roi = hsv[y-15:y+h+15, x-15:x+w+15]
roi = hsv[y:y+h, x:x+w]
# calculate area to determine if size worth looking at
area = cv2.contourArea(cnt)
if cfg.AREA_SIZE_TRAFFIC < area < cfg.MAX_AREA_SIZE:
# give ROI to pixel detect functions
result = light_signal(roi)
# show output in Demo Mode
if cfg.DEMO_MODE and result:
#print(area)
if result == "stop":
color = (0,0,255)
else:
color = (0,255,0)
cv2.rectangle(frame, (x,y), (x+w, y+h), color, 2)
cv2.putText(frame, result, (x+w, y+h), cfg.FONT, 1, color)
# return result
return result
# no colour significiant enough to be found
return None
|
#!/usr/bin/env python
import os
import sys
from DIRAC import S_OK, S_ERROR, gConfig, gLogger, exit
from DIRAC.Core.Base import Script
import DIRAC
usageMsg = '''Get squids for a site.
{0} [option|cfgfile] site'''.format(Script.scriptName)
Script.setUsageMessage(usageMsg)
Script.parseCommandLine(ignoreErrors=False)
args = Script.getPositionalArgs()
switches = Script.getUnprocessedSwitches()
if len(args) == 0:
site = DIRAC.siteName()
else:
site = args[0]
squidurl = ""
for squid in gConfig.getValue( 'Resources/Squids/%s' % ( site ), [] ):
squidurl = "http://" + squid + ":3128 "
print "%s" % squidurl
|
class Solution(object):
def trap(self, height):
if len(height) < 3:
return 0
max_idx = 0
cur_max = 0
for i in range(len(height)):
if height[i] > cur_max:
cur_max = height[i]
max_idx = i
sum_arr = [0]
max_left_arr = []
max_right_arr = []
for item in height:
sum_arr.append(sum_arr[-1] + item)
cur_max = 0
cur_max_index = 0
for i, item in enumerate(height):
if item > cur_max:
cur_max = item
cur_max_index = i
max_left_arr.append(cur_max_index)
cur_max = 0
cur_max_index = len(height) - 1
for i, item in reversed(list(enumerate(height))):
if item > cur_max:
cur_max = item
cur_max_index = i
max_right_arr.append(cur_max_index)
max_right_arr = list(reversed(max_right_arr))
print(max_idx)
print(sum_arr)
print(max_left_arr)
print(max_right_arr)
total = 0
left_index = max_idx
while left_index != 0:
new_left_index = max_left_arr[left_index - 1]
total += (left_index - new_left_index - 1) * height[new_left_index] - (sum_arr[left_index] - sum_arr[new_left_index + 1])
left_index = new_left_index
right_index = max_idx
while right_index != len(height) - 1:
new_right_index = max_right_arr[right_index + 1]
total += (new_right_index - right_index - 1) * height[new_right_index] - (sum_arr[new_right_index] - sum_arr[right_index + 1])
right_index = new_right_index
return total
if __name__ == "__main__":
sol = Solution()
a = sol.trap([0,1,0,2,1,0,1,3])
print("result = {}".format(a))
|
#!/usr/bin/python
# ==============================================================================
# Author: Tao Li (taoli@ucsd.edu)
# Date: Jun 4, 2015
# Question: 043-Multiply-Strings
# Link: https://leetcode.com/problems/multiply-strings/
# ==============================================================================
# Given two numbers represented as strings, return multiplication of the
# numbers as a string.
#
# Note: The numbers can be arbitrarily large and are non-negative.
# ==============================================================================
# Method: build-in multiplication method
# Note: Lazy boy~ simulate the multiplication operation in regits next time
# ==============================================================================
class Solution:
# @param {string} num1
# @param {string} num2
# @return {string}
def multiply(self, num1, num2):
return str(int(num1) * int(num2)) |
# Encoding: utf-8
""" Module to interact with the Matlab GUI for spine segmentation
and pulling out time courses from the data
"""
from __future__ import print_function
import copy
import logging
import os
import sys
import itertools
from builtins import zip
from builtins import map
import SimpleITK as sitk
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import plotly.graph_objs as go
import plotly.offline as py
import pandas as pd
import scipy.ndimage as ndimg
import skimage.external.tifffile as tf
import tables
import tqdm
from future.utils import iteritems
from networkx.algorithms.dag import descendants
from IPython.display import clear_output
from neurom.io import load_data
from scipy.interpolate import griddata
from scipy.ndimage.morphology import binary_dilation
from scipy.spatial import KDTree
from sklearn import linear_model
from prep.IO import loadmat, writeTiff
from prep.Log import add_logging_to_file
from prep.Utils import getStructure, log_progress, convert_8bit, angle
# Setup logging
logger = logging.getLogger(__name__)
logger.handlers = []
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging.Formatter("%(name)s @ %(asctime)s - [%(levelname)s] %(module)s::%(funcName)s: %(message)s"))
ch.setLevel(logging.INFO)
logger.addHandler(ch)
def initTimeDict(session, maskName='mask', um3bin=100, dilate1=0.5, dilate2=0.5, zStep=1, anatomyZstep=1.6):
""" Initialize a time courses dictionary
:param session: current session object
:param maskName: mask name
:param um3bin: microns^3 per bin in dendrite
:param dilate1: first dilation of each spine to subtract from dendrite mask
:param dilate2: second dilation of the combined mask
:param zStep: zStep of extended stack
:param anatomyZstep: Anatomical stack of FOV z spacing
:return: a timeDict dictionary
"""
timeDict = dict()
timeDict['path'] = session.path
timeDict['fullPath'] = session.path + maskName
timeDict['fieldsTform'] = session.embedDict['TFormExp']['fieldsTform']
timeDict['pixelSize'] = np.array([session.pixSizeXY / 1000, session.pixSizeXY / 1000, zStep])
timeDict['um3bin'] = um3bin
timeDict['dilate1'] = dilate1
timeDict['dilate2'] = dilate2
timeDict['anatomyZstep'] = anatomyZstep
timeDict['Fs'] = session.volRate
timeDict['xyPixNum'] = session.Sp['OptionsStruct']['RefPixels']
timeDict['UM_1X'] = float(session.Sp['OptionsStruct']['UM_1X'])
timeDict['fieldMask'] = session.fieldMaskBool
timeDict['imagingLines'] = session.ySize
timeDict['imagingPixels'] = session.xSize
timeDict['flyLines'] = session.optFlyLines
timeDict['nExp'] = session.regDict['nZShifts']
timeDict['finalShifts'] = session.regDict['finalShifts']
timeDict['grpZPos'] = session.regDict['grpZPos']
timeDict['groupZ'] = session.regDict['groupZ']
timeDict['fix_spines'] = False
# path to database
FOV = 'FOV' + str(int(session.meta['FOV'][0]))
path = os.path.join(session.basePath, 'Database', session.animalID, FOV, session.date + session.run)
timeDict['databasePath'] = path
timeDict['SessionPath'] = os.path.join(path, 'Session.tif')
# add_logging_to_file('prep.Timecourses', timeDict['fullPath'], 'Timecourses.log')
logger.info('timeDict initialized with full path:' + timeDict['fullPath'] + '.mat')
return timeDict
def load_hdf5(filename):
""" loads new .mat file save using the -v7.3 flag in matlab.
:param filename: file to load
:return: dict with the values
"""
fh = tables.open_file(filename)
data = dict()
data['labelimg'] = fh.root.newCell.labelimg[:].T
data['dendTable'] = fh.root.newCell.dendTable[:].T
dend = fh.root.newCell.dend[:]
dendFix = []
for d in dend:
dendFix.append(d[0].T)
data['dend'] = dendFix
data['quality'] = fh.root.newCell.quality[:]
data['note'] = fh.root.newCell.note[:]
data['dendNum'] = np.squeeze(fh.root.newCell.dendNum[:]).astype(np.uint8)
return data
def loadMask(timeDict):
""" load a mask from a mat file
:param timeDict: time course dictionary needs fullPath
:return adds: all the data from the mat file
"""
try:
logger.info('Trying to load: %s' % timeDict['fullPath'])
data = loadmat(timeDict['fullPath'][:-4])
except:
try:
data = loadmat(timeDict['fullPath'])
except (NotImplementedError, TypeError, ValueError):
logger.info('Trying to load hdf5 at: %s' % timeDict['fullPath'])
# new -v7.3 mat file is a hdf5 file
data = load_hdf5(timeDict['fullPath'])
timeDict['labelimg'] = data['labelimg']
timeDict['dendTable'] = data['dendTable']
a = load_data(os.path.join(timeDict['databasePath'], 'dendrite.swc'))
timeDict['dendR'] = a.data_block[:, 3]
timeDict['dend'] = data['dend']
timeDict['dendNumAll'] = data['dendNum'] - 1
timeDict['quality'] = data['quality']
timeDict['notes'] = data['note']
timeDict['dims'] = timeDict['labelimg'].shape
timeDict['spineMasks'] = len(np.unique(timeDict['labelimg'])) - 1
timeDict['dendNum'] = len(timeDict['dend'])
logger.info('imported mask with dims: ' + str(timeDict['labelimg'].shape) + ' ,branches: ' +
str(timeDict['dendNum']) + ', spine masks:' + str(timeDict['spineMasks']))
def binDendrite(sc, timeDict):
""" bin the dendrite in um2bin steps
:param sc: Spark Context
:param timeDict: time course dictionary needs data from mat file and um2bin param
:return: adds: dendLabelImg, dendLabelTable
"""
dims = timeDict['dims']
labelimg = timeDict['labelimg']
pixelSize = timeDict['pixelSize']
dend = timeDict['dend']
dendTable = timeDict['dendTable']
dendLabelImg = np.zeros(dims)
dendLabelTable = np.array([], ndmin=2)
# last dendrite mask
counter = np.max(labelimg) + 1
dendLabelImg = dendLabelImg.reshape(-1)
dendPathLength = []
flagFirst = True
# for each dendrite
segmentSize = list()
for i in log_progress(range(timeDict['dendNum']), name='Dendrites'):
if dend[i].shape[0] == 0:
logger.info('Skipped dendrite %d' % (i + 1,))
continue
X = dend[i][:, 1] - 1
Y = dend[i][:, 0] - 1
Z = dend[i][:, 2] - 1
# find radii
currentDend = dendTable[dendTable[:, 7] == i + 1, :]
if len(dend[i].shape) < 2:
r = currentDend[:, 5] / 2
else:
r = griddata(currentDend[:, [2, 3, 4]] / 2, currentDend[:, 5] / 2, dend[i][:, [0, 1, 2]], 'nearest')
# path length distance
distPath = np.sqrt((np.diff(X.astype(np.float32) * pixelSize[0])) ** 2 +
(np.diff(Y.astype(np.float32)) * pixelSize[1]) ** 2 +
(np.diff(Z.astype(np.float32)) * pixelSize[2]) ** 2)
# find the distance along the dendrite and segment into um2bin parts (10um default)
dist = np.sqrt((np.diff(X.astype(np.float32))) ** 2 +
(np.diff(Y.astype(np.float32))) ** 2 +
(np.diff(Z.astype(np.float32))) ** 2)
dist = dist * ((4.0 / 3.0) * np.pi * r[:-1])
stopPt = 0
# logger.info('Dendrite: %d' % (i + 1))
sys.stdout.flush()
# for each dendrite segment dilate and add to label image
# for each dendrite segment dilate and add to label image
if len(X) == 2:
starts = [0]
ends = [2]
lengths = [np.sum(distPath[0:2])]
else:
starts = []
ends = []
lengths = []
while stopPt < len(X):
startPt = stopPt
stop1 = np.where(np.cumsum(dist[startPt:]) > timeDict['um3bin'])[0] + startPt
if len(stop1) > 0:
stopPt = np.min(np.array([stop1[0], len(X)]))
else:
stopPt = len(X)
if startPt == stopPt:
stopPt += 2
starts.append(startPt)
ends.append(stopPt)
d = np.sum(distPath[startPt:stopPt])
if d == 0.0:
logger.error('Distance is 0!')
break
else:
lengths.append(d)
rdd = sc.parallelize(list(zip(range(len(starts)), starts, ends)))
def mask(kse):
key, start, stop = kse
dendLabelImgTemp = np.zeros(dims)
x1 = X[start:stop].astype(int)
y1 = Y[start:stop].astype(int)
z1 = Z[start:stop].astype(int)
# make sure no overflow
x1[x1 >= dims[0]] = dims[0] - 1
y1[y1 >= dims[1]] = dims[1] - 1
z1[z1 >= dims[2]] = dims[2] - 1
dendLabelImgTemp[x1, y1, z1] = 1
sSize = np.mean(r[start:stop])
dendLabelImgTemp = binary_dilation(dendLabelImgTemp > 0,
getStructure(np.array([pixelSize[0], pixelSize[1], 2.5]), sSize))
index_inner = np.ravel_multi_index(np.where(dendLabelImgTemp > 0), dims)
return key, sSize, index_inner
indexRdd = rdd.map(mask).collect()
for segNum, size, index in indexRdd:
dendLabelImg[index] = counter
if flagFirst:
dendLabelTable = np.array([counter, i, segNum], ndmin=2)
flagFirst = False
else:
dendLabelTable = np.append(dendLabelTable, np.array([counter, i, segNum], ndmin=2), axis=0)
counter += 1
segmentSize.append(size)
dendPathLength.append(np.array(lengths))
dendLabelImg = dendLabelImg.reshape(labelimg.shape)
timeDict['dendLabelImg'] = dendLabelImg
timeDict['dendLabelTable'] = dendLabelTable
timeDict['dendPathLength'] = dendPathLength
timeDict['dendSize'] = segmentSize
writeTiff(timeDict['path'], dendLabelImg, 'dendLabelImg')
def binDendriteByDistance(sc, timeDict, um4bin, hasSoma=True):
""" bin the dendrite in um4bin steps
:param sc: Spark Context
:param timeDict: time course dictionary needs data from mat file and um2bin param
:return: adds: dendLabelImg, dendLabelTable
"""
dims = timeDict['dims']
labelimg = timeDict['labelimg']
pixelSize = timeDict['pixelSize']
dend = timeDict['dend']
dendTable = timeDict['dendTable']
dendLabelImg = np.zeros(dims)
dendLabelTable = np.array([], ndmin=2)
# last dendrite mask
counter = np.max(labelimg) + 1
dendLabelImg = dendLabelImg.reshape(-1)
dendPathLength = []
flagFirst = True
# for each dendrite
if hasSoma:
if 'cellIndexAll' in timeDict:
logger.info('Using cellIndexAll')
soma_dends = np.unique(timeDict['Masks'].loc[timeDict['cellIndexAll']].DendNum.values)
else:
logger.info('Using cellIndex')
soma_dends = np.unique(timeDict['Masks'].loc[timeDict['cellIndex']].DendNum.values)
logger.info('Found %s as soma dendrite number(s)' % soma_dends)
else:
soma_dends = []
valid = [len(dend[k].shape) == 2 for k in soma_dends]
soma_dends = soma_dends[valid]
timeDict['soma_dends'] = soma_dends
segmentSize = list()
for i in log_progress(range(timeDict['dendNum']), name='Dendrites'):
if (dend[i].shape[0] == 0) | (len(dend[i].shape) < 2):
logger.info('Skipped dendrite %d' % (i + 1,))
continue
if len(soma_dends)>0:
if i == soma_dends[0]:
somaFlag = True
else:
somaFlag = False
else:
somaFlag = False
if len(soma_dends) >= 2 and somaFlag:
# soma mask needs stiching
a = [(dend[k][:, 1] - 1) for k in soma_dends]
X = np.hstack(a)
a = [(dend[k][:, 0] - 1) for k in soma_dends]
Y = np.hstack(a)
a = [(dend[k][:, 2] - 1) for k in soma_dends]
Z = np.hstack(a)
indexs = [dendTable[:, 7] == k + 1 for k in soma_dends]
index = np.logical_or(indexs[0], indexs[1])
currentDend = dendTable[index, :]
else:
if len(soma_dends) >= 2 and i in soma_dends:
continue
X = dend[i][:, 1] - 1
Y = dend[i][:, 0] - 1
Z = dend[i][:, 2] - 1
# find radii
currentDend = dendTable[dendTable[:, 7] == i + 1, :]
if len(dend[i].shape) < 2:
r = currentDend[:, 5] / 2
else:
r = griddata(currentDend[:, [2, 3, 4]] / 2, currentDend[:, 5] / 2, dend[i][:, [0, 1, 2]], 'nearest')
# path length distance
dist = np.sqrt((np.diff(X.astype(np.float32) * pixelSize[0])) ** 2 +
(np.diff(Y.astype(np.float32)) * pixelSize[1]) ** 2 +
(np.diff(Z.astype(np.float32)) * pixelSize[2]) ** 2)
if somaFlag:
starts = [0]
ends = [len(X)]
lengths = [np.sum(dist)]
else:
if len(X) == 2:
pass # continue
else:
stopPt = 0
starts = []
ends = []
lengths = []
while stopPt < len(X):
startPt = stopPt
stop1 = np.where(np.cumsum(dist[startPt:]) > um4bin)[0] + startPt
if len(stop1) > 0:
stopPt = np.min(np.array([stop1[0], len(X)]))
else:
break
starts.append(startPt)
ends.append(stopPt)
d = np.sum(dist[startPt:stopPt])
if d == 0.0:
logger.error('Distance is 0!')
break
else:
lengths.append(d)
rdd = sc.parallelize(list(zip(range(len(starts)), starts, ends)))
def mask(kse):
key, start, stop = kse
dendLabelImgTemp = np.zeros(dims)
x1 = X[start:stop].astype(int)
y1 = Y[start:stop].astype(int)
z1 = Z[start:stop].astype(int)
# make sure no overflow
x1[x1 >= dims[0]] = dims[0] - 1
y1[y1 >= dims[1]] = dims[1] - 1
z1[z1 >= dims[2]] = dims[2] - 1
dendLabelImgTemp[x1, y1, z1] = 1
sSize = np.mean(r[start:stop])
dendLabelImgTemp = binary_dilation(dendLabelImgTemp > 0,
getStructure(np.array([pixelSize[0], pixelSize[1], 2.5]), sSize))
index_inner = np.ravel_multi_index(np.where(dendLabelImgTemp > 0), dims)
return key, sSize, index_inner
indexRdd = rdd.map(mask).collect()
for segNum, size, index in indexRdd:
if len(index)>0:
dendLabelImg[index] = counter
if flagFirst:
dendLabelTable = np.array([counter, i, segNum], ndmin=2)
flagFirst = False
else:
dendLabelTable = np.append(dendLabelTable, np.array([counter, i, segNum], ndmin=2), axis=0)
counter += 1
segmentSize.append(size)
dendPathLength.append(np.array(lengths))
dendLabelImg = dendLabelImg.reshape(labelimg.shape)
timeDict['dendLabelImgB'] = dendLabelImg
timeDict['dendLabelTableB'] = dendLabelTable
timeDict['dendPathLengthB'] = dendPathLength
timeDict['dendSizeB'] = segmentSize
writeTiff(timeDict['path'], dendLabelImg, 'dendLabelImgB')
def getMasks(timeDict, makeB=False):
""" get spine masks by dilating twice
:param timeDict: time course dictionary need dendrite masks and pixelSize, dilate1 and dilate2
:return: adds labelimgAll
"""
labelimg = timeDict['labelimg']
if not makeB:
dendLabelImg = timeDict['dendLabelImg']
else:
dendLabelImg = timeDict['dendLabelImgB']
pixelSize = timeDict['pixelSize']
labelimg2 = copy.deepcopy(labelimg)
labelimg2 = labelimg2.reshape(-1)
rangeEnd = np.max(labelimg) + 1
# logger.info('Going over spines')
for i in log_progress(range(1, rangeEnd), name='Spines'):
index = np.ravel_multi_index(np.where(labelimg == i), labelimg.shape)
labelimgTemp = np.zeros(labelimg.shape)
labelimgTemp = labelimgTemp.reshape(-1)
labelimgTemp[index] = True
labelimgTemp = labelimgTemp.reshape(labelimg.shape)
labelimgTemp = binary_dilation(labelimgTemp, getStructure(pixelSize, timeDict['dilate1']))
index2 = np.ravel_multi_index(np.where(labelimgTemp), labelimg.shape)
labelimg2[index2] = i
labelimg2 = labelimg2.reshape(labelimg.shape)
labelimg3 = np.invert(binary_dilation(labelimg2 > 0, getStructure(pixelSize, timeDict['dilate2'])))
dendLabelImg2 = dendLabelImg * labelimg3
labelimgAll = dendLabelImg2 + labelimg
if not makeB:
timeDict['labelimgAll'] = labelimgAll
else:
timeDict['labelimgAllB'] = labelimgAll
def getMasksDF(timeDict, makeB=False, plot=True):
""" get Masks DataFrame and fix dendrite assignments
:param timeDict: time course dictionary
:param fix: if True will try to reassign spine numbers to dendrite numbers based on distance
:return: adds the Masks DataFrame
"""
fix = timeDict['fix_spines']
# todo: check for nans
if not makeB:
labelimgAll = timeDict['labelimgAll']
dendLabelTable = timeDict['dendLabelTable']
dendPathLength = timeDict['dendPathLength']
else:
labelimgAll = timeDict['labelimgAllB']
dendLabelTable = timeDict['dendLabelTableB']
dendPathLength = timeDict['dendPathLengthB']
mask = (labelimgAll > 0).astype(int)
label = labelimgAll
nMasks = label.max().astype(int)
centers = np.array(ndimg.center_of_mass(mask, label, range(1, nMasks + 1)))
pixelSize = timeDict['pixelSize'][0]
centers[:, 0:2] = centers[:, 0:2] * pixelSize
Centers = pd.Series([tuple(row) for row in centers])
types = ['Spine'] * timeDict['dendNumAll'].shape[0] + ['Dendrite'] * dendLabelTable.shape[0]
MaskType = pd.Series(types)
index = dendLabelTable[:, 0]
DendNum = pd.Series(np.concatenate((timeDict['dendNumAll'], dendLabelTable[:, 1].T)))
DendSegment = pd.Series(dendLabelTable[:, 2], index=index - 1)
Masks = pd.DataFrame({'Centers': Centers,
'MaskType': MaskType,
'DendNum': DendNum,
'DendSegment': DendSegment})
dendrites = Masks[(Masks['MaskType'] == 'Dendrite')]['DendNum'].unique()
dendCenters = dict()
for dend in dendrites:
dendCenters[dend] = np.asarray(
list(Masks[(Masks['MaskType'] == 'Dendrite') & (Masks['DendNum'] == dend)]['Centers'].values))
spines = Masks[(Masks['MaskType'] == 'Spine')]['DendNum'].unique()
if fix:
indexReplace = [None] * len(spines)
for index, S_dend in enumerate(spines):
indexReplace[index] = Masks[(Masks['MaskType'] == 'Spine') & (Masks['DendNum'] == S_dend)].index
new_values = [None] * len(spines)
for index, S_dend in enumerate(spines):
SpineCenters = np.asarray(
list(Masks[(Masks['MaskType'] == 'Spine') & (Masks['DendNum'] == S_dend)]['Centers'].values))
minDist = np.zeros((SpineCenters.shape[0], dendrites.shape[0]))
for indexS, spine in enumerate(SpineCenters):
for indexD, dend in enumerate(dendrites):
currentDend = dendCenters[dend]
minDist[indexS, indexD] = np.min(np.sum((currentDend - spine) ** 2, axis=1))
# indexReplace = Masks[(Masks['MaskType'] == 'Spine') & (Masks['DendNum'] == S_dend)].index
new_values[index] = dendrites[np.argmin(minDist.mean(axis=0))].astype(int)
# Masks['DendNum'][indexReplace[index]] = new_values
logger.info('Old: %d, new: %d' % (S_dend, new_values[index]))
for index, S_dend in enumerate(spines):
Masks['DendNum'][indexReplace[index]] = new_values[index]
Masks.dendPathLength = np.hstack(dendPathLength)
if not makeB:
timeDict['Masks'] = Masks
else:
for index in Masks[(Masks['MaskType'] == 'Spine')].index:
Masks['DendNum'][index] = timeDict['Masks']['DendNum'][index]
timeDict['MasksB'] = Masks
if plot:
SpineCenters = np.asarray(list(Masks[(Masks.MaskType == 'Spine')]['Centers']))
SpineDend = np.asarray(list(Masks[(Masks.MaskType == 'Spine')]['DendNum']))
plt.figure(figsize=(13, 13))
plt.imshow(labelimgAll.max(axis=2).transpose(1, 0))
for center, dend in zip(SpineCenters, SpineDend):
plt.text(center[0] / pixelSize, center[1] / pixelSize, str(dend))
dendNums = np.unique(dendLabelTable[:, 1].T)
for num in dendNums:
current = Masks[(Masks.MaskType == 'Dendrite') & (Masks.DendNum == num)].Centers
XY = (np.array(list(map(list, current.values)))[:, [0, 1]].mean(axis=0) / pixelSize).astype(int)
plt.text(XY[0], XY[1], str(num), size=20, color='white')
def getFieldMasks(timeDict, makeB=False):
""" assigns the masks back to the original data format
:param timeDict: time course dictionary need labelimgAll, fieldsTform
:return: adds: labelList
"""
if not makeB:
labelimgAll = timeDict['labelimgAll']
else:
labelimgAll = timeDict['labelimgAllB']
fieldsTform = timeDict['fieldsTform']
linearLabelimg = labelimgAll.flatten(order='F')
nLabel = np.max(linearLabelimg).astype(np.int64)
labelInfoAll = [None] * len(fieldsTform)
for i in range(0, len(fieldsTform)):
Tform = fieldsTform[i].T
labelInfo = np.zeros((Tform.shape[0], 3))
for j in range(0, Tform.shape[0]):
try:
labelInfo[j, 0] = linearLabelimg[int(Tform[j, 0])]
except Exception:
logger.error(str(i) + ' ' + str(j))
labelInfo[j, 0] = linearLabelimg[int(Tform[j, 0])]
labelInfo[j, 1] = Tform[j, 1]
labelInfo[j, 2] = i
labelInfoAll[i] = labelInfo.T
labelArray = np.hstack(labelInfoAll)
labelList = [None] * nLabel
for i in range(0, nLabel):
labelList[i] = labelArray[1:3, labelArray[0, :] == (i + 1)]
if not makeB:
timeDict['labelList'] = labelList
else:
timeDict['labelListB'] = labelList
def getRegionData(sc, data, timeDict, makeB=False):
""" get time course data
:param sc: SparkContext
:param data: Thunder Images object
:param timeDict: time course dictionary needs labelList
:return: adds: TC, TCPixels, TCMotion
"""
# todo: if tracing out of bounds!
if not makeB:
labelList = copy.deepcopy(timeDict['labelList'])
else:
labelList = copy.deepcopy(timeDict['labelListB'])
# maxIndex = np.prod(data.shape[1:]) * len(grpZPos)
# for i, label in enumerate(labelList):
# index = label[0, :] < maxIndex
# labelList[i][0, :] =labelList[i][0, :][index]
# index = label[1, :] < maxIndex
# labelList[i][1, :] =labelList[i][1, :][index]
finalShifts = timeDict['finalShifts']
grpZPos = timeDict['grpZPos']
groupZ = timeDict['groupZ']
labelListBC = sc.broadcast(labelList)
shiftsBC = sc.broadcast(finalShifts)
RegMean = groupZ.transpose(1, 2, 0, 3).flatten(order='F')
RegMeanBC = sc.broadcast(RegMean)
# old = np.seterr(all='raise')
# logger.info('Set numpy errors to raise')
def offsetVol(vol, pos, grpZPos2):
np.seterr(all='raise')
out = np.zeros((vol.shape[0], vol.shape[1], grpZPos2.shape[0] * vol.shape[2]))
out[:] = np.NAN
offset = np.argmin(np.absolute(grpZPos2 - pos))
for i in range(0, vol.shape[2]):
newFieldID = i * grpZPos2.shape[0] + offset
out[:, :, newFieldID] = vol[:, :, i]
return out.flatten(order='F').astype('float32')
def offsetVolPar(kv):
np.seterr(all='raise')
key, ary = kv
return offsetVol(ary, shiftsBC.value[np.array(key).astype(int), 0, 2], grpZPos).astype('float32')
def nanMeanByRegions(kv):
np.seterr(all='raise')
key, ary = kv
mean_values = []
for grp in labelListBC.value:
a = np.nansum(ary[np.array(grp[1, :], dtype='uint32')] * grp[0, :].flatten())
b = np.sum(~np.isnan(ary[np.array(grp[1, :], dtype='uint64')]) * grp[0, :].flatten())
if b == 0.0:
mean_values.append(np.nan)
else:
mean_values.append(a / b)
# mean_values = [np.nansum(ary[np.array(grp[1, :], dtype='uint32')] * grp[0, :].flatten()) / np.sum(
# ~np.isnan(ary[np.array(grp[1, :], dtype='uint64')]) * grp[0, :].flatten()) for grp in labelListBC.value]
return np.array(mean_values, dtype=ary.dtype).reshape((1, 1, -1))
def nanMeanByRegionsMotion(kv):
np.seterr(all='raise')
key, ary = kv
compMean = RegMeanBC.value * np.absolute(np.sign(ary))
norm_values = []
for grp in labelListBC.value:
a = np.nansum(compMean[np.array(grp[1, :], dtype='uint32')] * grp[0, :].flatten())
b = np.sum(~np.isnan(compMean[np.array(grp[1, :], dtype='uint32')]) * grp[0, :].flatten())
if b == 0.0:
norm_values.append(np.nan)
else:
norm_values.append(a / b)
return np.array(norm_values, dtype='float32').reshape((1, 1, -1))
def nanMeanByRegionsPixels(kv):
np.seterr(all='raise')
key, ary = kv
pixels = [np.sum(~np.isnan(ary[np.array(grp[1, :], dtype='uint64')]) * grp[0, :].flatten()) for grp in
labelListBC.value]
return np.array(pixels, dtype='float32').reshape((1, 1, -1))
RegDataExp = data.map(offsetVolPar, with_keys=True)
RegDataExp.cache()
RegDataExp.count()
if not makeB:
timeDict['TC'] = RegDataExp.map(nanMeanByRegions, with_keys=True).toarray().T
logger.info('Got TC')
timeDict['TCMotion'] = RegDataExp.map(nanMeanByRegionsMotion, with_keys=True).toarray().T
logger.info('Got TCMotion')
timeDict['TCPixels'] = RegDataExp.map(nanMeanByRegionsPixels, with_keys=True).toarray().T
logger.info('Got TCPixels')
else:
timeDict['TCB'] = RegDataExp.map(nanMeanByRegions, with_keys=True).toarray().T
logger.info('Got TCB')
timeDict['TCMotionB'] = RegDataExp.map(nanMeanByRegionsMotion, with_keys=True).toarray().T
logger.info('Got TCMotionB')
timeDict['TCPixelsB'] = RegDataExp.map(nanMeanByRegionsPixels, with_keys=True).toarray().T
logger.info('Got TCPixelsB')
RegDataExp.uncache()
# np.seterr(**old)
# logger.info('Set numpy errors to: %s' % old)
def getBaseline(sc, timeDict, regWindow=1000, maxZscore=2.0, step=8, makeB=False):
""" estimates baseline
:param sc: Spark Context
:param timeDict: time course dictionary needs: TC and TCMotion
:param regWindow: number of time points to smooth
:param maxZscore: maximum z score for a good point
:param step:
:return: adds: TCBaseline
"""
from sklearn import linear_model
# t = time.time()
timeDict['regWindow'] = regWindow
timeDict['maxZscore'] = maxZscore
timeDict['step'] = step
def estBaseline(key, model_inner):
np.seterr(all='warn')
from scipy.stats import gaussian_kde
start = int(max((0, key - regWindow / 2)))
stop = int(min((len(x), key + regWindow / 2)))
x1 = x_BC.value[start:stop]
y1 = y_BC.value[start:stop]
# p1 = p[start:stop]
x2 = x1[np.logical_not(np.isnan(x1))]
y2 = y1[np.logical_not(np.isnan(y1))]
if np.any(y2):
if len(y2) > 100:
kernel = gaussian_kde(y2)
low, high = np.percentile(y2, [25, 75]).astype(int)
step_inner = (high - low) / 100.
testRange = low + np.arange(start=1, stop=101, dtype=int) * step_inner
estMode = testRange[np.argmax(kernel(testRange))]
else:
estMode = np.median(y2)
y3 = y2[(y2 - estMode) < 0] - estMode
std = np.std(np.hstack((y3, -y3)))
zscore = (y1 - estMode) / std
goodPts = np.logical_and((zscore < maxZscore), not_nan_BC.value[start:stop])
else:
goodPts = []
if np.any(goodPts):
model_inner = model_inner.fit(x1[goodPts].reshape(-1, 1), y1[goodPts].reshape(-1, 1))
coef_inner = model_inner.coef_
if coef_inner < 0.1:
coef_inner = np.nanmean(y2) / np.nanmean(x2)
else:
coef_inner = np.NAN
return key, np.squeeze(coef_inner)
model = linear_model.LinearRegression(fit_intercept=False)
if not makeB:
TC = timeDict['TC']
TCMotion = timeDict['TCMotion']
TCPixels = timeDict['TCPixels']
else:
TC = timeDict['TCB']
TCMotion = timeDict['TCMotionB']
TCPixels = timeDict['TCPixelsB']
TCBaselineDict = dict()
inter_x = np.arange(0, TC.shape[1], 1, int)
inter_xp = np.arange(0, TC.shape[1], step, int)
for i in tqdm.tqdm(range(TC.shape[0])):
x = TCMotion[i, :]
y = TC[i, :]
p = TCPixels[i, :]
not_nan = np.logical_and(np.logical_not(np.isnan(x)), np.logical_not(np.isnan(y)))
not_nan = np.logical_and(not_nan, np.logical_not(np.isnan(p)))
x_BC = sc.broadcast(x)
y_BC = sc.broadcast(y)
not_nan_BC = sc.broadcast(not_nan)
coefDict = sc.parallelize(range(0, len(x), step)).map(lambda x2: estBaseline(x2, model)).collectAsMap()
coef = np.array([coefDict[idx] for idx in range(0, len(x), step)])
coef = np.interp(inter_x, inter_xp, coef)
TCBaselineDict[i] = x * np.squeeze(coef)
x_BC.unpersist()
y_BC.unpersist()
not_nan_BC.unpersist()
# current = time.time() - t
# m, s = divmod(current, 60)
# logger.info('i: %d, %02d:%02d' % (i, m, s))
# sys.stdout.flush()
TCBaseline = np.array([np.squeeze(TCBaselineDict[idx]) for idx in TCBaselineDict.keys()])
if not makeB:
timeDict['TCBaseline'] = TCBaseline
old = np.seterr(all='warn')
timeDict['TCdiv'] = TC / TCBaseline - 1
else:
timeDict['TCBaselineB'] = TCBaseline
old = np.seterr(all='warn')
timeDict['TCdivB'] = TC / TCBaseline - 1
def getNoise(sc, timeDict, makeB=False):
""" estimate noise
:param sc: Spark Context
:param timeDict: time course dictionary needs: TC and TCMotion
:return: adds TCNoise and TCZscore
"""
def fitNoise(key):
from scipy.optimize import curve_fit
def model_noise(x2, Ndiv, Nscale, offset):
lambdaAct = ((x2 + offset) ** Nscale) / Ndiv
return (lambdaAct ** 0.5) / lambdaAct
idx_inner = (TCdiv_BC.value[key[0], :] < 0).nonzero()[0]
TCPixels2 = copy.deepcopy(TCPixels_BC.value[key[0], :])
TCPixels2[TCPixels_BC.value[key[0], :] == 0] = np.NAN
x = TCPixels2[idx_inner] * TCBaseline_BC.value[key[0], idx_inner]
y = -TCdiv_BC.value[key[0], idx_inner]
validTps = np.isfinite(x) & np.isfinite(y)
x = x[validTps]
y = y[validTps]
x2 = x[x > 0]
y2 = y[x > 0]
try:
opt_parameters, parm_cov = curve_fit(model_noise, x2, y2 ** 2, maxfev=10000, method='trf')
if np.any(np.logical_not(np.isfinite(opt_parameters))):
TC_noise = np.ones_like(TCPixels2) * np.mean(y2 ** 2) ** 0.5
else:
TC_noise = model_noise(TCPixels2 * TCBaseline_BC.value[key[0], :], opt_parameters[0],
opt_parameters[1], opt_parameters[2]) ** 0.5
except:
TC_noise = np.ones_like(TCPixels2) * np.mean(y2 ** 2) ** 0.5
return key, TC_noise
if not makeB:
TCdiv_BC = sc.broadcast(timeDict['TCdiv'])
TCPixels_BC = sc.broadcast(timeDict['TCPixels'])
TCBaseline_BC = sc.broadcast(timeDict['TCBaseline'])
idxList = list(zip(range(0, timeDict['TCdiv'].shape[0])))
else:
TCdiv_BC = sc.broadcast(timeDict['TCdivB'])
TCPixels_BC = sc.broadcast(timeDict['TCPixelsB'])
TCBaseline_BC = sc.broadcast(timeDict['TCBaselineB'])
idxList = list(zip(range(0, timeDict['TCdivB'].shape[0])))
fitNoiseDict = sc.parallelize(idxList).map(fitNoise).collectAsMap()
TCNoise = np.array([fitNoiseDict[idx] for idx in idxList])
if not makeB:
timeDict['TCNoise'] = TCNoise
timeDict['TCzscore'] = timeDict['TCdiv'] / TCNoise
else:
timeDict['TCNoiseB'] = TCNoise
timeDict['TCzscoreB'] = timeDict['TCdivB'] / TCNoise
def loadInfo(timeDict):
""" load the .mat file with session info
:param timeDict: time course dictionary
:return: loaded mat file
"""
Info = loadmat(os.path.join(timeDict['databasePath'], 'prepareMasksAutoSWC.mat'))
timeDict['InfoSWC'] = Info['Info']
logger.info('Loaded ' + timeDict['path'] + 'prepareMasksAutoSWC.mat')
Info = loadmat(os.path.join(timeDict['databasePath'], 'prepareMasksAuto.mat'))
timeDict['Info'] = Info['Info']
logger.info('Loaded ' + timeDict['path'] + 'prepareMasksAuto.mat')
def getTransform(timeDict, outputFile='InvTransform.h5', getAligned=True, do_8bit=True, sat=1):
""" calculates a affine transformation from session space to anatomy stack space
:param timeDict: time course dict
:param outputFile: name of transform file to save to
:param getAligned: flag to apply the transformation and return 'Aligned' to timeDict
:return: inverse transformation (from session space to anatomical stack space)
"""
# callback invoked when the StartEvent happens, sets up our new data
def start_plot():
global metric_values, multires_iterations
metric_values = []
multires_iterations = []
# callback invoked when the EndEvent happens, do cleanup of data and figure
def end_plot():
global metric_values, multires_iterations
try:
del metric_values
del multires_iterations
# close figure, we don't want to get a duplicate of the plot latter on
plt.close()
except Exception:
pass
# callback invoked when the IterationEvent happens, update our data and display new figure
def plot_values(registration_method_inner):
global metric_values, multires_iterations
val = registration_method_inner.GetMetricValue()
if np.isfinite(val):
metric_values.append(registration_method_inner.GetMetricValue())
# clear the output area (wait=True, to reduce flickering), and plot current data
clear_output(wait=True)
# plot the similarity metric values
plt.plot(metric_values, 'r')
plt.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*')
plt.xlabel('Iteration Number', fontsize=12)
plt.ylabel('Metric Value', fontsize=12)
plt.show()
# callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the
# metric_values list.
def update_multires_iterations():
global metric_values, multires_iterations
multires_iterations.append(len(metric_values))
pxSize = timeDict['pixelSize'][0]
xyStep = timeDict['UM_1X'] / timeDict['xyPixNum']
aMaskSmooth = tf.imread(timeDict['SessionPath']).transpose(1, 2, 0)
sMaskSmooth = tf.imread(os.path.join(timeDict['databasePath'], 'expended_new.tif')).transpose(2, 1, 0)
sMaskSmooth2 = sMaskSmooth.flatten()[np.logical_not(np.isnan(sMaskSmooth.flatten()))]
t = np.median(sMaskSmooth2)
sMaskSmooth[sMaskSmooth < t] = 0
sMaskSmooth = np.nan_to_num(sMaskSmooth)
zNum = aMaskSmooth.shape[2]
timeDict['zNum'] = zNum
timeDict['sMaskSmooth'] = sMaskSmooth
timeDict['aMaskSmooth'] = aMaskSmooth
logger.info('Prepared images')
sys.stdout.flush()
target = sitk.GetImageFromArray(aMaskSmooth)
moving = sitk.GetImageFromArray(sMaskSmooth)
target.SetSpacing((timeDict['anatomyZstep'], xyStep, xyStep))
moving.SetSpacing((timeDict['pixelSize'][2], pxSize, pxSize))
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsCorrelation()
registration_method.SetInterpolator(sitk.sitkLinear)
# optimizer settings
registration_method.SetOptimizerAsGradientDescent(learningRate=0.5, numberOfIterations=20,
convergenceMinimumValue=1e-6, convergenceWindowSize=10,
estimateLearningRate=sitk.ImageRegistrationMethod.EachIteration)
registration_method.SetOptimizerScalesFromPhysicalShift()
# setup for the multi-resolution framework
registration_method.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
# connect all of the observers so that we can perform plotting during registration
registration_method.AddCommand(sitk.sitkStartEvent, start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent, lambda: plot_values(registration_method))
transform = sitk.CenteredTransformInitializer(target, moving, sitk.AffineTransform(3),
sitk.CenteredTransformInitializerFilter.MOMENTS)
registration_method.SetInitialTransform(transform)
registration_method.Execute(target, moving)
logger.info('Final metric value: {0}'.format(registration_method.GetMetricValue()))
logger.info('Optimizer\'s stopping condition, {0}'.format(
registration_method.GetOptimizerStopConditionDescription()))
if getAligned:
reSampler = sitk.ResampleImageFilter()
reSampler.SetTransform(transform)
reSampler.SetSize(target.GetSize())
reSampler.SetOutputSpacing(target.GetSpacing())
aligned = sitk.GetArrayFromImage(reSampler.Execute(moving))
timeDict['aligned'] = aligned
path = os.path.join(timeDict['path']+'View', '')
writeTiff(path, aligned, 'aligned')
if do_8bit:
aligned_8bit = convert_8bit(aligned, sat_percent=sat, ignore_nans=False, ignore_zero=True)
writeTiff(path, aligned_8bit, 'aligned_8bit', dtype='uint8')
transformInv = transform.GetInverse()
sitk.WriteTransform(transformInv, timeDict['path'] + outputFile)
logger.info('Saved inverse transform to ' + timeDict['path'] + outputFile)
return transformInv
def transformPoints(timeDict, transformInv, useSWC=True, makeB=False):
""" transform the center points of all masks to anatomy space in um
:param timeDict: time course dictionary
:param transformInv: the SimpleITK transformation
:param useSWC: load data from swc reconstruction file
:return: adds AnatomyCenters, ConnectingPoint and ConnectingDist to Masks DataFrame
"""
# transform point to anatomy space
if useSWC:
Info = timeDict['InfoSWC']
All = Info['AllSWC']
else:
Info = timeDict['Info']
All = Info['All']
img = tf.imread(timeDict['SessionPath']).astype(int).transpose(1, 2, 0)
imgSize = img.shape
xyStep = timeDict['UM_1X'] / timeDict['xyPixNum']
if not makeB:
Masks = timeDict['Masks']
else:
Masks = timeDict['MasksB']
AnatomyCenters = list([])
for center in Masks.Centers:
point = transformInv.TransformPoint((center[2], center[0], center[1]))
AnatomyCenters.append((point[1], point[2], point[0]))
Masks['AnatomyCenters'] = AnatomyCenters
# find closest point in interpolated space
Connecting = list([])
segList = list([])
for segNum, segment in enumerate(All):
x, y, z = np.unravel_index(segment, imgSize, 'F')
# subtract one for matlab 1 indexed problem
x = x - 1
y = y - 1
z = z - 1
# move to real space coordinates (um)
x = x * xyStep
y = y * xyStep
z = z * timeDict['anatomyZstep']
if len(x.shape) > 0:
Connecting.extend(list(zip(x, y, z)))
segList.extend([segNum] * x.shape[0])
else:
Connecting.append((x, y, z))
segList.append(segNum)
Connecting = np.array(Connecting)
tree = KDTree(Connecting)
distances, indexes = tree.query(AnatomyCenters)
loc = Connecting[indexes, :]
seg = np.array(segList)[indexes]
loc2 = list(map(tuple, loc))
Masks['Segment'] = seg
Masks['ConnectingPoint'] = loc2
Masks['ConnectingDist'] = distances
# find closest segment
if useSWC:
Table2 = np.array(timeDict['InfoSWC']['TableSWC'])
else:
Table2 = np.array(timeDict['Info']['Table'])
Table = Table2[:, 2:5]
Table[:, 0] = Table[:, 0] # * xyStep
Table[:, 1] = Table[:, 1] # * xyStep
Table[:, 2] = Table[:, 2] # * timeDict['anatomyZstep']
tree2 = KDTree(Table)
distances2, indexes2 = tree2.query(AnatomyCenters)
Ids = Table2[indexes2, 0]
Masks['ParentId'] = Ids
# timeDict['Masks'] = Masks
def loadTransform(path, filename='InvTransform.h5'):
""" loads a SimpleITK transformation object
:param path: path to file (session.path)
:param filename: transformation filename (*.h5)
:return: the transformation
"""
return sitk.ReadTransform(path + filename)
def getExcludeIndexB(timeDict):
li=timeDict['labelimgAll']
liB=timeDict['labelimgAllB']
eI = timeDict['excludeIndex']
Masks = timeDict['Masks']
spineIdx = np.asarray(list(Masks[(Masks.MaskType == 'Spine')].index))
dendIdx = np.asarray(list(Masks[(Masks.MaskType == 'Dendrite')].index))
eSpine = np.intersect1d(spineIdx,eI)
eDend = np.intersect1d(dendIdx,eI)
if np.any(np.isfinite(eDend)) and len(eDend)>0:
liMask=np.zeros(li.shape)
for idx in eDend:
liMask[li==(idx+1)]=1
eDendB = np.setdiff1d(np.unique(liB[liMask.astype('bool')]),0)-1
eDendNum = timeDict['Masks'].DendNum[eDend]
eDendNumB = timeDict['MasksB'].DendNum[eDendB]
keepIdx = np.intersect1d(eDendNumB,eDendNum)
eDendB=eDendB[[np.any(keepIdx==x) for x in eDendNumB]]
timeDict['excludeIndexB'] = np.hstack([eSpine,eDendB])
else:
timeDict['excludeIndexB'] = eSpine
timeDict['excludeIndexB'] = timeDict['excludeIndexB'].astype(int)
def get_graph_from_swc(timeDict, session):
"""
:param timeDict:
:param session:
:return:
"""
name = session.Sp['OptionsStruct']['filename']
name = str(name[name.rfind('\\') + 1:])
FOV = timeDict['databasePath'][:timeDict['databasePath'].rfind('/') + 1]
table = load_data(os.path.join(FOV, name))
logger.info('loaded SWC from %s: ' % os.path.join(FOV, name))
table2 = pd.DataFrame(data=table.data_block, columns=['x', 'y', 'z', 'r', 'type', 'id', 'pID'])
table2.z = table2.z * 1.6
table2.x = table2.x * (timeDict['UM_1X'] / timeDict['xyPixNum'])
table2.y = table2.y * (timeDict['UM_1X'] / timeDict['xyPixNum'])
table2 = table2[['id', 'type', 'x', 'y', 'z', 'r', 'pID']]
weight = []
start = []
end = []
for line in table2.iterrows():
Id = int(line[0]) + 1
p1 = np.array(line[1][2:5])
pID = int(line[1][6])
if Id == 0 or pID == -1:
weight.append(0)
start.append(-1)
end.append(1)
continue
p2 = np.array(table2.loc[int(pID - 1)][2:5])
length = np.linalg.norm(p2 - p1)
start.append(pID)
end.append(Id)
weight.append(length)
table3 = pd.DataFrame(data=np.array(list(zip(start, end, weight))), columns=['start', 'end', 'weight'])
G = nx.from_pandas_edgelist(table3, source='start', target='end', edge_attr=['weight'])
isTree = nx.algorithms.tree.recognition.is_tree(G)
logger.info('Is tree: %s' % isTree)
timeDict['graph'] = G
def get_path_length(timeDict, makeB=False, plot=True):
G = timeDict['graph']
pathLen = dict(nx.shortest_path_length(G, weight='weight'))
CellBodyPath = pathLen[1.0]
timeDict['CellBodyPath'] = CellBodyPath
if not makeB:
Masks = timeDict['Masks']
labelimgAll = timeDict['labelimgAll']
else:
Masks = timeDict['MasksB']
labelimgAll = timeDict['labelimgAllB']
for i, mask in enumerate(Masks.ParentId):
if mask != 1.0:
Masks.set_value(i, 'PathLength', CellBodyPath[mask])
else:
Masks.set_value(i, 'PathLength', 0)
pathLenArray = np.zeros((len(pathLen) - 1, len(pathLen) - 1))
for key, value in iteritems(pathLen):
if key != -1.0:
for key2 in sorted(value):
if key2 != -1.0:
pathLenArray[int(key - 1), int(key2 - 1)] = value[key2]
timeDict['pathLengthAll'] = pathLenArray
if not makeB:
timeDict['pathIndex'] = np.array(np.argsort(Masks.PathLength))
timeDict['pathLength'] = np.sort(np.array(Masks.PathLength))
else:
timeDict['pathIndexB'] = np.array(np.argsort(Masks.PathLength))
timeDict['pathLengthB'] = np.sort(np.array(Masks.PathLength))
if plot:
plt.figure(figsize=(12, 12))
plt.imshow(labelimgAll.max(axis=2).transpose(1, 0))
DendCenters = np.asarray(list(Masks[(Masks.MaskType == 'Dendrite')]['Centers']))
DendPath = np.asarray(list(Masks[(Masks.MaskType == 'Dendrite')]['PathLength']))
pixelSize = timeDict['pixelSize'][0]
for center, dend in zip(DendCenters, DendPath):
plt.text(center[0] / pixelSize + 18, center[1] / pixelSize, str(int(dend)), color='r')
def get_soma_dendrites(timeDict):
if 'cellIndexAll' in timeDict:
logger.info('Using cellIndexAll')
soma_dends = np.unique(timeDict['Masks'].loc[timeDict['cellIndexAll']].DendNum.values)
else:
logger.info('Using cellIndex')
soma_dends = np.unique(timeDict['Masks'].loc[timeDict['cellIndex']].DendNum.values)
logger.info('Found %s as soma dendrite number(s)' % soma_dends)
timeDict['soma_dends'] = soma_dends
def get_branch_id(timeDict):
"""
:param timeDict:
:return:
"""
G = timeDict['graph']
CellBodyPath = timeDict['CellBodyPath']
xyStep = timeDict['UM_1X'] / timeDict['xyPixNum']
Table2 = np.array(timeDict['InfoSWC']['TableSWC'])
Table = Table2[:, 2:5]
Table[:, 0] = Table[:, 0] * xyStep
Table[:, 1] = Table[:, 1] * xyStep
Table[:, 2] = Table[:, 2] * timeDict['anatomyZstep']
GD = G.to_directed()
dist = {}
for node in GD:
dist[node] = GD.number_of_edges(1, node)
for node in GD:
pred_nodes = [x for x in GD.predecessors(node)]
for pred_node in pred_nodes:
if CellBodyPath[pred_node] >= CellBodyPath[node]:
GD.remove_edge(pred_node, node)
dfs = nx.algorithms.traversal.dfs_preorder_nodes(GD, source=1.0)
counterID = 1
branchID = np.zeros(len(GD))
for node in dfs:
if node == 1:
for nodeID in GD.successors(node):
if nodeID != -1:
branchID[int(nodeID)] = counterID
counterID += 1
else:
succ = np.array(list(GD.successors(node)))
if len(succ) == 1:
branchID[int(succ[0])] = branchID[int(node)]
elif len(succ) >= 2:
out_deg = []
for nodeID in succ:
GS = GD.subgraph(descendants(GD, nodeID))
ODS = GS.out_degree()
out_deg.append(np.sum([x[1] > 1 for x in list(ODS)]))
max_deg = np.max(out_deg)
putShaft = succ[(out_deg == max_deg).nonzero()[0]]
if len(putShaft) == 1:
shaft_node = putShaft
else:
angles = []
vec_in = Table[int(node) - 1, :] - Table[int(list(GD.predecessors(node))[0]) - 1, :]
for psn in putShaft:
vec_out = Table[int(psn) - 1, :] - Table[int(node) - 1, :]
angles.append(angle(vec_in, vec_out))
shaft_node = putShaft[np.argmin(angles)]
for nodeID in succ:
if nodeID == shaft_node:
branchID[int(nodeID)] = branchID[int(node)]
else:
branchID[int(nodeID)] = counterID
counterID += 1
branchID = branchID[1:]
timeDict['branchID'] = branchID
timeDict['directed_graph'] = GD
def get_branch_to_branch(timeDict, makeB=False):
""" get connecting points, distance, relative branch order between branches
:param timeDict:
:return: dend_info
dend_info axis 0: mask in dendrite 1, mask in dendrite 2, distance, soma traverse, relative branch order
dend_info axis 1, 2: number of dendrites in session, the soma, number of terminal leafs
"""
pathLenArray = timeDict['pathLengthAll']
if makeB:
excludeIndex = timeDict['excludeIndexB']
Masks = timeDict['MasksB']
else:
excludeIndex = timeDict['excludeIndex']
Masks = timeDict['Masks']
if np.any(np.isnan(excludeIndex)):
excludeIndex=[]
G = timeDict['graph']
branchID = timeDict['branchID']
soma_dends = timeDict['soma_dends']
exclude_dend = np.unique(Masks.loc[excludeIndex].DendNum.values)
dend = timeDict['dend']
num_dend = len(dend)
transformInv = loadTransform(timeDict['path'])
pixelSizeSession = timeDict['pixelSize']
leafs = [x for x in G.nodes() if G.degree(x) == 1 and x != -1]
n_total = num_dend + len(leafs)
results = np.zeros((5, n_total, n_total))
Table2 = np.array(timeDict['InfoSWC']['TableSWC'])
Table = Table2[:, 2:5]
Table[:, 0] = Table[:, 0]
Table[:, 1] = Table[:, 1]
Table[:, 2] = Table[:, 2]
tree2 = KDTree(Table)
swc_endpoints = Table2[np.array(leafs).astype(int) - 1, -1]
index_list = []
ids_list = []
distance_list = []
is_terminal = []
branch_modal_node = []
for i in range(num_dend):
dend1 = copy.deepcopy(dend[i])
dend1_fov = []
if len(dend1.shape) < 2:
dend1 = [dend1]
for center in dend1:
point = transformInv.TransformPoint(
(float(center[2]), center[1] * pixelSizeSession[0], center[0] * pixelSizeSession[0]))
dend1_fov.append((point[1], point[2], point[0]))
distances, Indexes = tree2.query(dend1_fov)
if np.median(distances) > 10:
logger.info('Median dist 1 high: = %f' % np.median(distances))
distances_dend1, Indexes_dend1 = tree2.query(dend1_fov)
index_list.append(Indexes_dend1)
distance_list.append(distances_dend1)
Ids_dend1 = Table2[Indexes_dend1, 0]
Ids_dend1 = get_excluded_index(Ids_dend1, pathLenArray, i)
branch_ids, branch_ids_count = np.unique(branchID[np.array(Ids_dend1).astype(int) - 1], return_counts=True)
high_id = branch_ids[np.argmax(branch_ids_count)]
high_id2 = np.where(branchID[np.array(Ids_dend1).astype(int) - 1] == high_id)[0][0]
branch_modal_node.append(Ids_dend1[high_id2])
Ids_dend1 = np.unique(Ids_dend1)
ids_list.append(Ids_dend1)
is_terminal.append(len(np.intersect1d(Ids_dend1, swc_endpoints)) > 0)
for x, y in itertools.product(range(n_total), range(n_total)):
if x in soma_dends or y in soma_dends or x in exclude_dend or y in exclude_dend or x == y:
continue
# print(x, y)
if x < num_dend:
distances_dend1 = distance_list[x]
Indexes_dend1 = index_list[x]
Ids_dend1 = ids_list[x]
branch_modal_node_x = branch_modal_node[x]
elif x == num_dend:
Ids_dend1 = np.array([1])
distances_dend1 = np.array([0])
Indexes_dend1 = np.array([0])
branch_modal_node_x = 1.0
else:
Ids_dend1 = np.array([leafs[x - num_dend]])
distances_dend1 = np.array([0])
Indexes_dend1 = np.array([leafs[x - num_dend] - 1]).astype(int)
branch_modal_node_x = Ids_dend1[0]
if y < num_dend:
distances_dend2 = distance_list[y]
Indexes_dend2 = index_list[y]
Ids_dend2 = ids_list[y]
branch_modal_node_y = branch_modal_node[y]
elif y == num_dend:
Ids_dend2 = np.array([1])
distances_dend2 = np.array([0])
Indexes_dend2 = np.array([0])
branch_modal_node_y = 1.0
else:
Ids_dend2 = np.array([leafs[y - num_dend]])
distances_dend2 = np.array([0])
Indexes_dend2 = np.array([leafs[y - num_dend] - 1]).astype(int)
branch_modal_node_y = Ids_dend2[0]
dist = np.inf
dend1_close = 0
dend2_close = 0
for x2, y2 in itertools.product(Ids_dend1, Ids_dend2):
if timeDict['pathLengthAll'][int(x2 - 1), int(y2 - 1)] < dist:
dist = timeDict['pathLengthAll'][int(x2 - 1), int(y2 - 1)]
dend1_close = x2
dend2_close = y2
if np.isfinite(dist):
Ids_dend1_best = np.where(Table2[Indexes_dend1, 0] == dend1_close)[0]
dend1_best_dist = distances_dend1[Ids_dend1_best]
dend1_best_offset = np.argmin(dend1_best_dist)
dend1_best_point = Ids_dend1_best[dend1_best_offset]
Ids_dend2_best = np.where(Table2[Indexes_dend2, 0] == dend2_close)[0]
dend2_best_dist = distances_dend2[Ids_dend2_best]
dend2_best_offset = np.argmin(dend2_best_dist)
dend2_best_point = Ids_dend2_best[dend2_best_offset]
node_list = nx.shortest_path(G, dend1_close, dend2_close)
crossing_soma = 1.0 in node_list
node_list2 = nx.shortest_path(G, branch_modal_node_x, branch_modal_node_y)
relative_branch_order = len(np.unique(branchID[np.array(node_list2).astype(int) - 1])) - 1
results[:, x, y] = np.array(
[dend1_best_point, dend2_best_point, dist, crossing_soma, relative_branch_order])
else:
raise ValueError('Distance between %d and %d not finite' % (x, y))
if makeB:
timeDict['dend_infoB'] = results
timeDict['dend_is_terminalB'] = is_terminal
else:
timeDict['dend_info'] = results
timeDict['dend_is_terminal'] = is_terminal
def get_mask_to_mask(timeDict, makeB=False, smoothWin=50):
"""
:param timeDict:
:return:
"""
All = []
for x in timeDict['dend']:
if len(x.shape) > 1:
All.append(x[:, [1, 0, 2]])
else:
All.append(x[[1, 0, 2]])
if makeB:
Masks = timeDict['MasksB']
excludeIndex = timeDict['excludeIndexB']
results = timeDict['dend_infoB']
else:
Masks = timeDict['Masks']
excludeIndex = timeDict['excludeIndex']
results = timeDict['dend_info']
G = timeDict['graph']
AnatomyCenters = np.array([np.array(x) for x in Masks.Centers])
# find closest point in interpolated space
segList = []
Dist = []
Indexes = []
maskOrder = []
for segNum, segment in enumerate(All):
maskIdx = (Masks.DendNum == segNum).values.nonzero()[0].astype(int)
AnatomyCentersSeg = AnatomyCenters[maskIdx]
if len(segment.shape) < 2:
segment = segment[np.newaxis, :]
x = segment[:, 0]
y = segment[:, 1]
z = segment[:, 2]
# subtract one for matlab 1 indexed problem
x = x - 1
y = y - 1
z = z - 1
# move to real space coordinates (um)
x = x * timeDict['pixelSize'][0]
y = y * timeDict['pixelSize'][1]
z = z * timeDict['pixelSize'][2]
sx = meanSmooth(x, smoothWin)
sy = meanSmooth(y, smoothWin)
sz = meanSmooth(z, smoothWin)
dist = np.hstack([0, np.cumsum(np.sqrt(np.diff(sx) ** 2 + np.diff(sy) ** 2 + np.diff(sz) ** 2))])
tree = KDTree(np.array(list(zip(x, y, z))))
distances, indexes = tree.query(AnatomyCentersSeg)
Indexes = np.hstack([Indexes, len(Dist) + np.array(indexes)])
Dist = np.hstack([Dist, dist])
maskOrder = np.hstack([maskOrder, maskIdx])
segList = np.hstack([segList, segNum * np.ones(len(dist))])
result2 = copy.copy(results)
Indexes = Indexes.astype(int)
segList = segList.astype(int)
maskOrder = maskOrder.astype(int)
nDend = len(All)
cumIdx = 0
for i in range(result2.shape[1]):
result2[0, i, :] = result2[0, i, :] + cumIdx
result2[1, :, i] = result2[1, :, i] + cumIdx
if i < nDend:
if len(All[i].shape)>1:
cumIdx = cumIdx + len(All[i])
else:
cumIdx = cumIdx + 1
nMasks = len(Indexes)
pathLengthGrid = np.ones([nMasks, nMasks]) * np.NAN
somaTravGrid = np.ones([nMasks, nMasks]) * np.NAN
relBranchOrder = np.ones([nMasks, nMasks]) * np.NAN
for i in range(nMasks):
for j in range(nMasks):
segx = segList[Indexes[i]]
segy = segList[Indexes[j]]
if segx == segy:
pathLengthGrid[maskOrder[i], maskOrder[j]] = np.absolute(Dist[Indexes[i]] - Dist[Indexes[j]])
else:
distx = np.absolute(Dist[Indexes[i]] - Dist[int(result2[0, segx, segy])])
disty = np.absolute(Dist[Indexes[j]] - Dist[int(result2[1, segx, segy])])
pathLengthGrid[maskOrder[i], maskOrder[j]] = distx + disty + result2[2, segx, segy]
somaTravGrid[maskOrder[i], maskOrder[j]] = result2[3, segx, segy]
relBranchOrder[maskOrder[i], maskOrder[j]] = result2[4, segx, segy]
pathLengthSoma = np.ones(nMasks) * np.NAN
branchOrder = np.ones(nMasks) * np.NAN
for i in range(nMasks):
segx = segList[Indexes[i]]
distx = np.absolute(Dist[Indexes[i]] - Dist[int(result2[0, segx, nDend])])
pathLengthSoma[maskOrder[i]] = distx + result2[2, segx, nDend]
branchOrder[maskOrder[i]] = result2[4, segx, nDend] - 1
leafs = [x for x in G.nodes() if G.degree(x) == 1 and x != -1]
nTerminal = len(leafs)
pathLengthTerminals = np.ones([nMasks, nTerminal]) * np.NAN
pathLengthTerminalsTrav = np.ones([nMasks, nTerminal]) * np.NAN
for i in range(nMasks):
for j in range(nTerminal):
segx = segList[Indexes[i]]
distx = np.absolute(Dist[Indexes[i]] - Dist[int(result2[0, segx, nDend + j])])
pathLengthTerminals[maskOrder[i], j] = distx + result2[2, segx, nDend + j]
pathLengthTerminalsTrav[maskOrder[i], j] = result2[3, segx, nDend + j]
if np.any(np.isfinite(excludeIndex)):
pathLengthGrid[:, excludeIndex] = np.NAN
pathLengthGrid[excludeIndex, :] = np.NAN
pathLengthSoma[excludeIndex] = np.NAN
branchOrder[excludeIndex] = np.NAN
pathLengthTerminals[excludeIndex, :] = np.NAN
pathLengthTerminalsTrav[excludeIndex, :] = np.NAN
somaTravGrid[:, excludeIndex] = np.NAN
somaTravGrid[excludeIndex, :] = np.NAN
relBranchOrder[:, excludeIndex] = np.NAN
relBranchOrder[excludeIndex, :] = np.NAN
if makeB:
timeDict['pathLengthGridB'] = pathLengthGrid
timeDict['pathLengthSomaB'] = pathLengthSoma
timeDict['branchOrderB'] = branchOrder
timeDict['pathLengthTerminalsB'] = pathLengthTerminals
timeDict['pathLengthTerminalsTravB'] = pathLengthTerminalsTrav
timeDict['somaTravGridB'] = somaTravGrid
timeDict['relBranchOrderB'] = relBranchOrder
else:
timeDict['pathLengthGrid'] = pathLengthGrid
timeDict['pathLengthSoma'] = pathLengthSoma
timeDict['branchOrder'] = branchOrder
timeDict['pathLengthTerminals'] = pathLengthTerminals
timeDict['pathLengthTerminalsTrav'] = pathLengthTerminalsTrav
timeDict['somaTravGrid'] = somaTravGrid
timeDict['relBranchOrder'] = relBranchOrder
def findPathLength(timeDict, session, makeB=False, plot=True, smoothWin=50):
""" calculates the path length along the dendrite to all masks 0 is cell body
:param timeDict: time course dictionary
:param session:
:param loadSWC: load data from swc reconstruction file
:return:
"""
get_graph_from_swc(timeDict, session)
get_path_length(timeDict, makeB=makeB, plot=plot)
get_branch_id(timeDict)
get_soma_dendrites(timeDict)
get_branch_to_branch(timeDict, makeB=makeB)
get_mask_to_mask(timeDict, makeB=makeB, smoothWin=smoothWin)
def meanSmooth(x, winSize):
halfWin = int(winSize / 2)
X = np.ones([len(x), winSize + 1]) * np.NAN
for i in range(halfWin):
X[(halfWin - i):, i] = x[:-(halfWin - i)]
X[-(halfWin - i):, i] = np.NAN
X[:, halfWin] = x
for i in range(1, halfWin + 1):
X[:-i, i + halfWin] = x[i:]
X[:i, i + halfWin] = np.NAN
return np.nanmean(X, axis=1)
def get_excluded_index(dendrite_ids, path_length, dendrite_number, dist_threshold=10):
""" checks for discontinuities in distance between reconstruction points along a dendrites and tries to solve them
by either cutting from the first one onwards or cutting from the beginning
:param dendrite_ids: reconstruction points for current dendrite
:param path_length: path length along the dendrite between reconstruction points
:param dendrite_number: dendrite number in session
:param dist_threshold: two reconstruction points with a distance larger then dist count as a discontinuity
:return: reconstruction points after deleting the discontinuities
"""
# get distances between points
dist_dend = []
for i in range(len(dendrite_ids) - 1):
dist_dend.append(path_length[int(dendrite_ids[i]-1), int(dendrite_ids[i + 1]-1)])
large_dist = (np.array(dist_dend) > dist_threshold).nonzero()[0]
n_pairs = len(large_dist) / 2
# try cutting from the first one
exclude_index_1 = []
for i in range(int(n_pairs)):
exclude_index_1.extend(range(large_dist[i * 2] + 1, large_dist[i * 2 + 1] + 1))
if len(large_dist) % 2:
exclude_index_1.extend(range(large_dist[-1] + 1, len(dendrite_ids) + 1))
large_dist2 = np.hstack((0, large_dist))
n_pairs2 = len(large_dist2) / 2
# try cutting from the beginning
exclude_index_2 = []
for i in range(int(n_pairs2)):
exclude_index_2.extend(range(large_dist2[i * 2] + 1, large_dist2[i * 2 + 1] + 1))
if len(large_dist2) % 2:
exclude_index_2.extend(range(large_dist2[-1] + 1, len(dendrite_ids) + 1))
# The better one is the longer one (smaller exclusion points)
if len(exclude_index_1) > 0 or len(exclude_index_2) > 0:
if len(exclude_index_1) < len(exclude_index_2):
if len(exclude_index_1) > 0:
logger.info('Excluded dend Dist 1 %d: %s' % (dendrite_number, exclude_index_1))
dendrite_ids = np.delete(dendrite_ids, exclude_index_1)
elif len(exclude_index_2) > 0:
logger.info('Excluded dend Dist 2 %d: %s' % (dendrite_number, exclude_index_2))
dendrite_ids = np.delete(dendrite_ids, exclude_index_2)
return dendrite_ids
def showDendrites(timeDict, showB=False, exclude=None):
""" helper function to plot the dendrite numbers
:param timeDict: time course dictionary
"""
if not showB:
Masks = timeDict['Masks']
labelimgAll = timeDict['labelimgAll']
dendLabelTable = timeDict['dendLabelTable']
else:
Masks = timeDict['MasksB']
labelimgAll = timeDict['labelimgAllB']
dendLabelTable = timeDict['dendLabelTableB']
plt.figure(figsize=(12, 12))
plt.imshow(labelimgAll.max(axis=2).transpose(1, 0))
DendCenters = np.asarray(list(Masks[(Masks.MaskType == 'Dendrite')]['Centers']))
DendPath = np.asarray(list(Masks[(Masks.MaskType == 'Dendrite')].index))
pixelSize = timeDict['pixelSize'][0]
for center, dend in zip(DendCenters, DendPath):
plt.text(center[0] / pixelSize + 18, center[1] / pixelSize, str(int(dend)), color='r')
dendNums = np.unique(dendLabelTable[:, 1].T)
for num in dendNums:
if exclude is None or num not in exclude:
current = Masks[(Masks.MaskType == 'Dendrite') & (Masks.DendNum == num)].Centers
XY = (np.array(list(map(list, current.values)))[:, [0, 1]].mean(axis=0) / pixelSize).astype(int)
plt.text(XY[0], XY[1], str(num), size=20, color='white')
def showSpines(timeDict, selected=None, pixSize=376.666, showB=False):
"""
:param timeDict:
:return:
"""
if showB:
masks = timeDict['MasksB']
else:
masks = timeDict['Masks']
shapes = []
annotations = []
for mark in selected:
x = masks.loc[mark].Centers[0] * 1000.0 / pixSize
y = masks.loc[mark].Centers[1] * 1000.0 / pixSize
shapes.append({
'type': 'circle',
'xref': 'x',
'yref': 'y',
'x0': x - 4,
'y0': y - 4,
'x1': x + 4,
'y1': y + 4,
'line': {
'color': 'rgba(50, 171, 96, 1)',
},
})
annotations.append(dict(
x=x,
y=y + 5,
xref='x',
yref='y',
text=str(mark),
font=dict(
family='Courier New, monospace',
size=16,
color='#ffffff'),
showarrow=True,
arrowhead=7,
ax=0,
ay=-40
))
if showB:
labelimgAll = timeDict['labelimgAllB']
else:
labelimgAll = timeDict['labelimgAll']
layout = go.Layout(height=labelimgAll.shape[1] * 2,
width=labelimgAll.shape[0] * 2,
shapes=shapes,
annotations=annotations)
heatmap = labelimgAll.max(axis=2).transpose(1, 0)
trace = go.Heatmap(z=heatmap)
py.iplot(dict(data=[trace], layout=layout))
def get_path_length_grid(timeDict):
"""
:param timeDict:
:return:
"""
Masks = timeDict['Masks']
if isinstance(timeDict['pathLengthAll'], dict):
a = np.zeros((len(timeDict['pathLengthAll']) + 1, len(timeDict['pathLengthAll']) + 1))
for key, value in timeDict['pathLengthAll'].iteritems():
for key2 in sorted(value):
a[int(key), int(key2)] = value[key2]
timeDict['pathLengthAll'] = a
pathLength = timeDict['pathLengthAll']
pathLengthGrid = np.zeros((len(Masks), len(Masks)))
for x in range(len(Masks)):
xNode = int(Masks.loc[x].ParentId)
for y in range(len(Masks)):
yNode = int(Masks.loc[y].ParentId)
if x != y:
pathLengthGrid[x, y] = pathLength[xNode - 1, yNode - 1]
excludeIndex = timeDict['excludeIndex']
somaPathLength = np.array(timeDict['Masks']['PathLength'])
spineIdx = np.array(timeDict['Masks']['MaskType'] == 'Spine').nonzero()[0]
somaPathLength[spineIdx] = somaPathLength[spineIdx]
if np.any(np.isfinite(excludeIndex)):
pathLengthGrid[:, excludeIndex] = np.NAN
# pathLengthGrid[excludeIndex, :] = np.NAN
timeDict['pathLengthGrid'] = pathLengthGrid
def get_high_res_path_length_grid(timeDict, smoothWin=50):
def meanSmooth(x,winSize):
X = np.ones([len(x),winSize+1])*np.NAN
for i in range(winSize/2):
X[((winSize/2)-i):,i]=x[:-((winSize/2)-i)]
X[-(winSize/2-i):,i]=np.NAN
X[:,winSize/2]=x
for i in range(1,winSize/2+1):
X[:-i,i+winSize/2]=x[i:]
X[:i,i+winSize/2]=np.NAN
return np.nanmean(X,axis=1)
All=[]
for x in timeDict['dend']:
if len(x.shape)>1:
All.append(x[:,[1,0,2]])
else:
All.append(x[[1,0,2]])
Masks = timeDict['Masks']
nMasks = len(Masks)
AnatomyCenters = np.array([np.array(x) for x in Masks.Centers])
# find closest point in interpolated space
segList = np.zeros(nMasks)
Dist = np.zeros(nMasks)
for segNum, segment in enumerate(All):
maskIdx = (Masks.DendNum==segNum).values.nonzero()[0]
AnatomyCentersSeg = AnatomyCenters[maskIdx]
if len(segment.shape)<2:
Dist[maskIdx]=np.zeros(len(maskIdx))
segList[maskIdx]=segNum * np.ones(len(maskIdx))
continue
x = segment[:,0]
y = segment[:,1]
z = segment[:,2]
# subtract one for matlab 1 indexed problem
x = x - 1
y = y - 1
z = z - 1
# move to real space coordinates (um)
x = x * timeDict['pixelSize'][0]
y = y * timeDict['pixelSize'][1]
z = z * timeDict['pixelSize'][2]
sx = meanSmooth(x,smoothWin)
sy = meanSmooth(y,smoothWin)
sz = meanSmooth(z,smoothWin)
dist = np.hstack([0,np.cumsum(np.sqrt(np.diff(sx)**2+np.diff(sy)**2+np.diff(sz)**2))])
tree = KDTree(np.array(list(zip(x, y, z))))
distances, indexes = tree.query(AnatomyCentersSeg)
Dist[maskIdx]=dist[indexes]
segList[maskIdx]=segNum * np.ones(len(maskIdx))
pathLengthGrid=np.ones([nMasks,nMasks])*np.NAN
for i in range(nMasks):
for j in range(nMasks):
if segList[i]==segList[j]:
pathLengthGrid[i,j]=np.absolute(Dist[i]-Dist[j])
excludeIndex = timeDict['excludeIndex']
if np.any(np.isfinite(excludeIndex)):
pathLengthGrid[:, excludeIndex] = np.NAN
timeDict['pathLengthGridHR'] = pathLengthGrid
def get_branch_dist(timeDict, plot=False, warn_dist=10):
# Note: Not working most of the time
dend = timeDict['dend']
num_dend = len(dend)
transformInv = loadTransform(timeDict['path'])
pixelSizeSession = timeDict['pixelSize']
xyStep = timeDict['UM_1X'] / timeDict['xyPixNum']
# find closest point in interpolated space
img = tf.imread(timeDict['SessionPath']).astype(int).transpose(1, 2, 0)
imgSize = img.shape
Info = timeDict['InfoSWC']
All = Info['AllSWC']
Connecting = list([])
segList = list([])
for segNum, segment in enumerate(All):
x, y, z = np.unravel_index(segment, imgSize, 'F')
# subtract one for matlab 1 indexed problem
x = x - 1
y = y - 1
z = z - 1
# move to real space coordinates (um)
x = x * xyStep
y = y * xyStep
z = z * timeDict['anatomyZstep']
if len(x.shape) > 0:
Connecting.extend(list(zip(x, y, z)))
segList.extend([segNum] * x.shape[0])
else:
Connecting.append((x, y, z))
segList.append(segNum)
Connecting = np.array(Connecting)
tree = KDTree(Connecting)
Table2 = np.array(timeDict['InfoSWC']['TableSWC'])
Table = Table2[:, 2:5]
Table[:, 0] = Table[:, 0] # * xyStep
Table[:, 1] = Table[:, 1] # * xyStep
Table[:, 2] = Table[:, 2] # * timeDict['anatomyZstep']
tree2 = KDTree(Table)
# results in ((point in dend x, point in dend y, dist), dend x, dend y)
results = np.zeros((3, num_dend, num_dend))
for x, y in itertools.product(range(num_dend), range(num_dend)):
dend1 = copy.deepcopy(dend[x]).astype('float')
dend2 = copy.deepcopy(dend[y]).astype('float')
dend1_fov = []
dend2_fov = []
for center in dend1:
point = transformInv.TransformPoint(
(center[2], center[1] * pixelSizeSession[1], center[0] * pixelSizeSession[0]))
dend1_fov.append((point[1], point[2], point[0]))
for center in dend2:
point = transformInv.TransformPoint(
(center[2], center[1] * pixelSizeSession[1], center[0] * pixelSizeSession[0]))
dend2_fov.append((point[1], point[2], point[0]))
distances, indexes = tree.query(dend1_fov)
if np.median(distances) > warn_dist:
logger.info('Median dist high: = %f' % np.median(distances))
distances_dend1, indexes_dend1 = tree2.query(dend1_fov)
Ids_dend1 = Table2[indexes_dend1, 0]
distances_dend2, indexes_dend2 = tree2.query(dend2_fov)
Ids_dend2 = Table2[indexes_dend2, 0]
dendNum_dend2 = Table2[indexes_dend2, -1]
dendNum_dend1 = Table2[indexes_dend1, -1]
test = copy.deepcopy(dendNum_dend1)
uD, counts = np.unique(test, return_counts=True)
exIdx = []
for k in uD:
check = (test == k).nonzero()[0]
if len(check) > 1:
if np.max(np.diff(check)) > 1:
exIdx = (test != uD[np.argmax(counts)]).nonzero()[0]
else:
exIdx = (test != uD[np.argmax(counts)]).nonzero()[0]
if len(exIdx) > 0:
logger.info('Excluded dend1: %s' % exIdx)
Ids_dend1 = np.delete(Ids_dend1, exIdx)
test = copy.deepcopy(dendNum_dend2)
uD, counts = np.unique(test, return_counts=True)
exIdx = []
for k in uD:
check = (test == k).nonzero()[0]
if len(check) > 1:
if np.max(np.diff(check)) > 1:
exIdx = (test != uD[np.argmax(counts)]).nonzero()[0]
else:
exIdx = (test != uD[np.argmax(counts)]).nonzero()[0]
if len(exIdx) > 0:
logger.info('Excluded dend2: %s' % exIdx)
Ids_dend2 = np.delete(Ids_dend2, exIdx)
Ids_dend2 = np.unique(Ids_dend2).astype('int')
Ids_dend1 = np.unique(Ids_dend1).astype('int')
dist = np.inf
dend1_close = 0
dend2_close = 0
for x2, y2 in itertools.product(Ids_dend1, Ids_dend2):
if timeDict['pathLengthAll'][x2][y2] < dist:
dist = timeDict['pathLengthAll'][x2][y2]
dend1_close = x2
dend2_close = y2
if np.isfinite(dist):
Ids_dend1_best = np.where(Table2[indexes_dend1, 0] == dend1_close)[0]
dend1_best_dist = distances_dend1[Ids_dend1_best]
dend1_best_offset = np.argmin(dend1_best_dist)
dend1_best_point = Ids_dend1_best[dend1_best_offset]
Ids_dend2_best = np.where(Table2[indexes_dend2, 0] == dend2_close)[0]
dend2_best_dist = distances_dend2[Ids_dend2_best]
dend2_best_offset = np.argmin(dend2_best_dist)
dend2_best_point = Ids_dend2_best[dend2_best_offset]
results[:, x, y] = np.array([dend1_best_point, dend2_best_point, dist])
else:
results[:, x, y] = np.array([np.nan, np.nan, dist])
if plot:
plt.imshow(results[2, :, :])
plt.colorbar(label='Distance')
plt.xlabel('Dend#')
plt.ylabel('Dend#')
return results
def get_mask_index(timeDict, mask='Spine', use_B=False, noise_th=None):
"""
:param timeDict: timeDict to use
:param mask: options are 'Spine' and 'Dendrite'
:param use_B: Make masksB etc.
:param noise_th: if None will return all mask index if float will return mean noise < then threshold
:return: index of masks
"""
if use_B:
b = 'B'
else:
b = ''
masks = timeDict['Masks' + b]
exclude = timeDict['excludeIndex' + b]
indexs_all = np.where(masks.MaskType == mask)[0]
indexs_good = np.setdiff1d(indexs_all, exclude)
if noise_th is not None:
noise = np.nanmean(timeDict['TCNoise' + b], axis=1)
good_noise = np.where(noise < noise_th)[0]
return np.intersect1d(indexs_good, good_noise)
else:
return indexs_good
|
"""
Preprocessing worker class for parallel text processing.
"""
import multiprocessing as mp
import re
import logging
from ..utils import merge_dict_sequences_inplace
from ._common import ngrams, vocabulary, vocabulary_counts, doc_frequencies, sparse_dtm, \
glue_tokens, remove_chars, transform, _build_kwic, expand_compounds, clean_tokens, filter_tokens, \
filter_documents, filter_documents_by_name, filter_for_pos, filter_tokens_by_mask, filter_tokens_with_kwic
logger = logging.getLogger('tmtoolkit')
logger.addHandler(logging.NullHandler())
pttrn_metadata_key = re.compile(r'^meta_(.+)$')
class PreprocWorker(mp.Process):
def __init__(self, worker_id, language, tasks_queue, results_queue, tokenizer, stemmer, lemmatizer, pos_tagger,
group=None, target=None, name=None, args=(), kwargs=None):
super().__init__(group, target, name, args, kwargs or {}, daemon=True)
logger.debug('worker `%s`: init with worker ID %d' % (name, worker_id))
self.worker_id = worker_id
self.language = language
self.tasks_queue = tasks_queue
self.results_queue = results_queue
# set a tokenizer
self.tokenizer = tokenizer # tokenizer function
# set a stemmer
self.stemmer = stemmer # stemmer function
# set a lemmatizer
self.lemmatizer = lemmatizer # lemmatizer function
# set a POS tagger
self.pos_tagger = pos_tagger # POS tagger instance (must have a callable attribute `tag`)
self.pattern_module = None # dynamically loaded CLiPS pattern library module
self.germalemma = None # GermaLemma instance
self.wordnet_lemmatizer = None # nltk.stem.WordNetLemmatizer instance
self._doc_labels = [] # list of document labels for self._tokens
self._tokens = [] # tokens for this worker at the current processing stage.
# list of token strings
self._tokens_meta = [] # dict of lists with metadata for each token in each document {meta_... -> list}
self._metadata_keys = []
self._ngrams = [] # generated ngrams as list of token strings
def run(self):
logger.debug('worker `%s`: run' % self.name)
for next_task, task_kwargs in iter(self.tasks_queue.get, None):
logger.debug('worker `%s`: received task `%s`' % (self.name, next_task))
exec_task_fn = getattr(self, '_task_' + next_task)
if exec_task_fn:
exec_task_fn(**task_kwargs)
else:
raise NotImplementedError("Task not implemented: `%s`" % next_task)
self.tasks_queue.task_done()
logger.debug('worker `%s`: shutting down' % self.name)
self.tasks_queue.task_done()
def _task_init(self, docs, docs_are_tokenized):
logger.debug('worker `%s`: docs = %s' % (self.name, str(set(docs.keys()))))
self._doc_labels = list(docs.keys())
self._ngrams = []
if docs_are_tokenized:
logger.info('got %d already tokenized documents' % len(docs))
self._tokens = [doc['token'] for doc in docs.values()]
meta_keys = None
self._tokens_meta = []
for dl, doc in docs.items():
doc_meta = {k: metadata for k, metadata in doc.items() if k.startswith('meta_')}
self._tokens_meta.append(doc_meta)
if not all(k.startswith('meta_') for k in doc_meta.keys()):
raise ValueError('all meta data keys must start with "meta_"'
' but this is not the case in document `%s`' % dl)
if meta_keys is None:
meta_keys = set(doc_meta.keys())
else:
if meta_keys != set(doc_meta.keys()):
raise ValueError('all documents must contain the same meta data keys')
self._metadata_keys = [k[5:] for k in meta_keys] # strip "meta_"
else:
# directly tokenize documents
logger.info('tokenizing %d documents' % len(docs))
self._tokens = self.tokenizer(list(docs.values()), language=self.language)
self._tokens_meta = [{} for _ in range(len(docs))]
def _task_get_doc_labels(self):
self.results_queue.put(self._doc_labels)
def _task_get_tokens(self):
# tokens with metadata
self.results_queue.put(dict(zip(self._doc_labels,
(dict(meta, token=t) for t, meta in zip(self._tokens, self._tokens_meta)))))
def _task_replace_tokens(self, tokens):
assert set(tokens.keys()) == set(self._doc_labels)
for dl, dt in tokens.items():
self._tokens[self._doc_labels.index(dl)] = dt
def _task_get_available_metadata_keys(self):
self.results_queue.put(self._metadata_keys)
def _task_get_vocab(self):
"""Put this worker's vocabulary in the result queue."""
self.results_queue.put(vocabulary(self._tokens))
def _task_get_vocab_counts(self):
self.results_queue.put(vocabulary_counts(self._tokens))
def _task_get_vocab_doc_frequencies(self):
self.results_queue.put(doc_frequencies(self._tokens))
def _task_get_ngrams(self):
self.results_queue.put(dict(zip(self._doc_labels, self._ngrams)))
def _task_get_dtm(self):
"""
Put this worker's document-term-matrix (DTM), the document labels and sorted vocabulary in the result queue.
"""
# create a sparse DTM in COO format
logger.info('creating sparse DTM for %d documents' % len(self._doc_labels))
dtm, vocab = sparse_dtm(self._tokens)
# put tuple in queue with:
# DTM, document labels that correspond to DTM rows and vocab that corresponds to DTM columns
self.results_queue.put((dtm, self._doc_labels, vocab))
def _task_get_state(self):
logger.debug('worker `%s`: getting state' % self.name)
state_attrs = (
'language',
'_doc_labels',
'_tokens',
'_tokens_meta',
'_ngrams',
'_metadata_keys'
)
state = {attr: getattr(self, attr) for attr in state_attrs}
logger.debug('worker `%s`: got state with %d items' % (self.name, len(state)))
self.results_queue.put(state)
def _task_set_state(self, **state):
logger.debug('worker `%s`: setting state' % self.name)
for attr, val in state.items():
setattr(self, attr, val)
def _task_add_metadata_per_token(self, key, data, default):
logger.debug('worker `%s`: adding metadata per token' % self.name)
col = 'meta_' + key
for dt, dmeta in zip(self._tokens, self._tokens_meta):
dmeta[col] = []
for t in dt:
dmeta[col].append(data.get(t, default))
if key not in self._metadata_keys:
self._metadata_keys.append(key)
def _task_add_metadata_per_doc(self, key, data):
logger.debug('worker `%s`: adding metadata per document' % self.name)
col = 'meta_' + key
for dl, tmeta in zip(self._doc_labels, self._tokens_meta):
tmeta[col] = data[dl]
if key not in self._metadata_keys:
self._metadata_keys.append(key)
def _task_remove_metadata(self, key):
logger.debug('worker `%s`: removing metadata column' % self.name)
if key in self._metadata_keys:
col = 'meta_' + key
for tmeta in self._tokens_meta:
del tmeta[col]
self._metadata_keys.pop(self._metadata_keys.index(key))
def _task_generate_ngrams(self, n):
self._ngrams = ngrams(self._tokens, n, join=False)
def _task_use_joined_ngrams_as_tokens(self, join_str):
self._tokens = [list(map(lambda g: join_str.join(g), dngrams)) for dngrams in self._ngrams]
# do reset because meta data doesn't match any more:
self._clear_metadata()
# reset ngrams as they're used as normal tokens now
self._ngrams = {}
def _task_transform_tokens(self, transform_fn, **kwargs):
self._tokens = transform(self._tokens, transform_fn, **kwargs)
def _task_tokens_to_lowercase(self):
self._tokens = transform(self._tokens, str.lower)
def _task_stem(self):
self._tokens = self.stemmer(self._tokens)
def _task_remove_chars(self, chars):
self._tokens = remove_chars(self._tokens, chars=chars)
def _task_pos_tag(self):
pos_tags = self.pos_tagger(self._tokens)
merge_dict_sequences_inplace(self._tokens_meta, pos_tags)
if 'pos' not in self._metadata_keys:
self._metadata_keys.append('pos')
def _task_lemmatize(self):
self._tokens = self.lemmatizer(self._tokens, self._tokens_meta)
def _task_expand_compound_tokens(self, split_chars=('-',), split_on_len=2, split_on_casechange=False):
"""
Note: This function will reset the token dataframe `self._tokens` to the newly created tokens. This means
all token metadata will be gone.
"""
self._tokens = expand_compounds(self._tokens, split_chars=split_chars, split_on_len=split_on_len,
split_on_casechange=split_on_casechange)
# do reset because meta data doesn't match any more:
self._clear_metadata()
def _task_clean_tokens(self, tokens_to_remove, remove_shorter_than, remove_longer_than, remove_numbers):
# punctuation, empty token and stopwords may already be included in `tokens_to_remove`
self._tokens, self._tokens_meta = clean_tokens(self._tokens, self._tokens_meta, remove_punct=False,
remove_stopwords=tokens_to_remove, remove_empty=False,
remove_shorter_than=remove_shorter_than,
remove_longer_than=remove_longer_than,
remove_numbers=remove_numbers)
def _task_get_kwic(self, search_tokens, highlight_keyword, with_metadata, with_window_indices, context_size,
match_type, ignore_case, glob_method, inverse):
docs = list(zip(self._tokens, self._tokens_meta)) if self._metadata_keys else self._tokens
kwic = _build_kwic(docs, search_tokens,
highlight_keyword=highlight_keyword,
with_metadata=with_metadata,
with_window_indices=with_window_indices,
context_size=context_size,
match_type=match_type,
ignore_case=ignore_case,
glob_method=glob_method,
inverse=inverse)
# result is a dict with doc label -> list of kwic windows, where each kwic window is dict with
# token -> token list and optionally meta_* -> meta data list
self.results_queue.put(dict(zip(self._doc_labels, kwic)))
def _task_glue_tokens(self, patterns, glue, match_type, ignore_case, glob_method, inverse):
new_tokens_and_meta, glued_tokens = glue_tokens(list(zip(self._tokens, self._tokens_meta)), patterns,
glue=glue, match_type=match_type, ignore_case=ignore_case,
glob_method=glob_method, inverse=inverse,
return_glued_tokens=True)
if new_tokens_and_meta:
self._tokens, self._tokens_meta = zip(*new_tokens_and_meta)
# result is a set of glued tokens
self.results_queue.put(glued_tokens)
def _task_filter_tokens_by_mask(self, mask, inverse):
mask_list = [mask[dl] for dl in self._doc_labels]
self._tokens, self._tokens_meta = filter_tokens_by_mask(self._tokens, mask_list, self._tokens_meta,
inverse=inverse)
def _task_filter_tokens(self, search_tokens, match_type, ignore_case, glob_method, inverse, by_meta):
if by_meta:
by_meta = 'meta_' + by_meta
self._tokens, self._tokens_meta = filter_tokens(self._tokens, search_tokens, self._tokens_meta,
match_type=match_type, ignore_case=ignore_case,
glob_method=glob_method, inverse=inverse, by_meta=by_meta)
def _task_filter_tokens_with_kwic(self, search_tokens, context_size, match_type, ignore_case,
glob_method, inverse):
self._tokens, self._tokens_meta = filter_tokens_with_kwic(self._tokens, search_tokens, self._tokens_meta,
context_size=context_size, match_type=match_type,
ignore_case=ignore_case, glob_method=glob_method,
inverse=inverse)
def _task_filter_documents(self, search_tokens, by_meta, matches_threshold, match_type, ignore_case, glob_method,
inverse_result, inverse_matches):
if by_meta:
by_meta = 'meta_' + by_meta
self._tokens, self._tokens_meta, self._doc_labels = filter_documents(
self._tokens, search_tokens, by_meta=by_meta, docs_meta=self._tokens_meta, doc_labels=self._doc_labels,
matches_threshold=matches_threshold, match_type=match_type, ignore_case=ignore_case,
glob_method=glob_method, inverse_result=inverse_result, inverse_matches=inverse_matches
)
def _task_filter_documents_by_name(self, name_patterns, match_type, ignore_case, glob_method, inverse):
self._tokens, self._doc_labels, self._tokens_meta = filter_documents_by_name(self._tokens, self._doc_labels,
name_patterns, self._tokens_meta,
match_type=match_type,
ignore_case=ignore_case,
glob_method=glob_method,
inverse=inverse)
def _task_filter_for_pos(self, required_pos, pos_tagset, simplify_pos, inverse):
self._tokens, self._tokens_meta = filter_for_pos(self._tokens, self._tokens_meta,
required_pos=required_pos,
tagset=pos_tagset,
simplify_pos=simplify_pos,
inverse=inverse)
def _clear_metadata(self):
self._tokens_meta = [{} for _ in range(len(self._tokens))]
self._metadata_keys = []
|
#!/usr/bin/env python
# coding: utf-8
# # LCR in series
# 
# Resonating frequency
# \begin{equation}
# f_0=\frac{1}{2\pi\sqrt{LC}}
# \end{equation}
# Quality factor
# \begin{equation}
# Q=\frac{1}{R}\sqrt{\frac{L}{C}}
# \end{equation}
# Inductive reactance
# \begin{equation}
# X_L=2\pi fL
# \end{equation}
# Capacitive reactance
# \begin{equation}
# X_C=\frac{1}{2\pi fC}
# \end{equation}
# Impedance
# \begin{equation}
# z=\sqrt{(X_L-X_C)^2+R^2}
# \end{equation}
# Current
# \begin{equation}
# I=\frac{V}{z}
# \end{equation}
# Phase angle
# \begin{equation}
# \phi=\tan^{-1}(\frac{X_L-X_C}{R})
# \end{equation}
# Power consumed
# \begin{equation}
# P=VI\cos\phi
# \end{equation}
# P.d. across L
# \begin{equation}
# V_L=IX_L
# \end{equation}
# P.d. across C
# \begin{equation}
# V_C=IX_C
# \end{equation}
# P.d. across R
# \begin{equation}
# V_R=IX_R
# \end{equation}
# P.d. across LC
# \begin{equation}
# V_{LC}=I(X_L -X_C)
# \end{equation}
# In[1]:
import numpy as np
# In[2]:
def fo(L,C):
fr=2*np.pi*np.sqrt(L*C)
fr=1/fr
return fr
# In[3]:
def Q(L,C,R):
ql=np.sqrt(L/C)/R
return ql
# In[4]:
def inductiveReactance(L,f):
XL=2*np.pi*L*f
return XL
# In[5]:
def capacitiveReactance(C,f):
XC=1/(2*np.pi*C*f)
return XC
# In[6]:
def impedance(L,C,R,f):
if(C==0):
imp=np.sqrt((inductiveReactance(f,L))**2+R**2)
else:
imp=np.sqrt((inductiveReactance(f,L)-capacitiveReactance(f,C))**2+R**2)
return imp
# In[7]:
def phase(L,C,R,f):
if(C==0):
pi=np.arctan(inductiveReactance(f,L)/R)*180/np.pi
else:
pi=np.arctan((inductiveReactance(f,L)-capacitiveReactance(f,C))/R)*180/np.pi
return pi
# In[8]:
def power(L,C,R,f,v):
ph=phase(L,C,R,f)
cur=v*(impedance(L,C,R,f))**(-1)
p=v*cur*np.cos(np.pi*ph/180)
return p
# In[ ]:
|
import json
class BaseNamespace(object):
name = None
def __init__(self, server):
self.server = server
self.clients = []
super(BaseNamespace, self).__init__()
def get_name(self):
""" Get namespace name """
return self.name
def _register_client(self, client):
""" Add client to list of connected clients """
self.clients.append(client)
def _unregister_client(self, client):
""" Remove client from list of connected clients """
self.clients.remove(client)
def on_start(self, client):
""" Client starts connection """
pass
def client_connected(self, client):
""" Client connected """
self._register_client(client)
def on_stop(self, code, reason):
""" Server stopped """
pass
def client_disconnected(self, client):
""" Client disconnected """
self._unregister_client(client)
def on_message(self, client, message):
""" Message received from client """
pass
def _format_outbound_data(self, message):
""" Ensure character encoding """
return message.encode('utf8')
def emit(self, message):
""" Send message to all connected clients """
data = self._format_outbound_data(message)
for client in self.clients:
client.sendMessage(data)
def emit_to(self, client, message):
""" Send message to single client """
data = self._format_outbound_data(message)
client.sendMessage(data)
def emit_except(self, client, message):
""" Send message to all clients except single """
data = self._format_outbound_data(message)
for client_connected in self.clients:
if client != client_connected:
client.sendMessage(data)
class EventMixin(object):
def __init__(self, server):
super(EventMixin, self).__init__(server)
self.callbacks = self.register_callbacks()
def register_callbacks(self):
return {}
def _handle_inbound_message(self, client, message):
""" Take an inbound message, parse and fire any callbacks """
payload = self._parse_inbound_message(message)
if payload:
event = payload.pop('event')
self._fire_callback(client, event, **payload)
def _fire_callback(self, client, event, **kwargs):
""" Check for registered callbacks and fire """
if event in self.callbacks:
try:
self.callbacks[event](client, **kwargs)
except TypeError:
print 'An Error occurred calling event [%s]' % event
pass
def _parse_inbound_message(self, message):
""" Parse inbound JSON message """
payload = json.loads(message)
if 'event' in payload:
output = payload
return output
def on_message(self, client, message):
""" Message received from client """
self._handle_inbound_message(client, message)
def _format_outbound_data(self, event, **kwargs):
""" Format outbound message as JSON """
message = {'event': event}
for key in kwargs.keys():
message[key] = kwargs.get(key)
return json.dumps(message).encode('utf8')
def emit(self, event, **kwargs):
""" Send message to all connected clients """
data = self._format_outbound_data(event, **kwargs)
for client in self.clients:
client.sendMessage(data)
def emit_to(self, client, event, **kwargs):
""" Send message to single client """
data = self._format_outbound_data(event, **kwargs)
client.sendMessage(data)
def emit_except(self, client, event, **kwargs):
""" Send message to all clients except single """
data = self._format_outbound_data(event, **kwargs)
for client_connected in self.clients:
if client != client_connected:
client.sendMessage(data)
class RoomMixin(EventMixin):
def __init__(self, server):
super(RoomMixin, self).__init__(server)
self.rooms = {}
def register_callbacks(self):
callbacks = super(RoomMixin, self).register_callbacks()
callbacks.update({
'room/create': self.create_room,
'room/destroy': self.destroy_room,
'room/join': self.join_room,
'room/leave': self.leave_room,
'room/broadcast': self.broadcast_room,
'room/peers': self.get_room_peers,
})
return callbacks
def create_room(self, client, **kwargs):
name = kwargs.get('name')
join = kwargs.get('join', False)
if name and name not in self.rooms:
self.rooms[name] = {'name': name, 'clients': {}}
self.emit_to(client, 'room/created', name=name)
if join:
self.join_room(client, name=name)
def destroy_room(self, client, **kwargs):
name = kwargs.get('name')
if name and name in self.rooms:
for key in self.rooms[name]['clients'].keys():
room_client = self.rooms[name]['clients'][key]
self.leave_room(room_client, name=name)
del self.rooms[name]
self.emit_to(client, 'room/destroyed', name=name)
else:
self.emit_to(client, 'room/error', message='Room %s does not exist' % name)
def get_room_peers(self, client, **kwargs):
name = kwargs.get('name')
if name and name in self.rooms:
clients = []
for key in self.rooms[name]['clients'].keys():
room_client = self.rooms[name]['clients'][key]
clients.append(room_client.peer)
self.emit_to(client, 'room/peers', peers=clients)
else:
self.emit_to(client, 'room/error', message='Room %s does not exist' % name)
def join_room(self, client, **kwargs):
name = kwargs.get('name')
if name and name in self.rooms and not self.rooms[name]['clients'].get(client.peer):
self.rooms[name]['clients'][client.peer] = client
self.emit_to(client, 'room/joined', name=name)
else:
self.emit_to(client, 'room/error', message='Room %s does not exist' % name)
def leave_room(self, client, **kwargs):
name = kwargs.get('name')
respond = kwargs.get('respond', True)
if name and name in self.rooms and self.rooms[name]['clients'].get(client.peer):
del self.rooms[name]['clients'][client.peer]
if respond:
self.emit_to(client, 'room/left', name=name)
def broadcast_room(self, client, **kwargs):
name = kwargs.get('name')
payload = kwargs.get('payload')
exclude = kwargs.get('exclude', True)
if name and name in self.rooms:
for key in self.rooms[name]['clients'].keys():
room_client = self.rooms[name]['clients'][key]
if not exclude or room_client.peer != client.peer:
self.emit_to(
room_client, 'room/broadcast', name=name, payload=payload)
else:
self.emit_to(client, 'room/error', message='Room %s does not exist' % name)
|
"""
.. module:: test_offers_list
"""
from django.test import Client
from django.test import TestCase
from apps.volontulo.factories import OfferFactory
from apps.volontulo.factories import OrganizationFactory
from apps.volontulo.factories import UserFactory
class TestOffersList(TestCase):
"""Class responsible for testing offers' list."""
def setUp(self):
"""Set up each test."""
self.client = Client()
def _test_offers_list(self, response):
"""Test offers' list."""
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'offers/offers_list.html')
self.assertIn('offers', response.context)
def test_offer_list_for_anonymous_user(self):
"""Test offers' list for anonymus user."""
OfferFactory.create_batch(
31,
offer_status='published',
finished_at=None,
recruitment_end_date=None,
)
OfferFactory.create_batch(48, offer_status='unpublished')
OfferFactory.create_batch(52, offer_status='rejected')
response = self.client.get('/o/offers')
self._test_offers_list(response)
self.assertEqual(len(response.context['offers']), 31)
def test_offers_list_for_volunteer(self):
"""Test offers' list for account of volunteer."""
OfferFactory.create_batch(
67,
offer_status='published',
finished_at=None,
recruitment_end_date=None,
)
OfferFactory.create_batch(73, offer_status='unpublished')
OfferFactory.create_batch(89, offer_status='rejected')
self.client.force_login(UserFactory())
response = self.client.get('/o/offers')
self._test_offers_list(response)
self.assertEqual(len(response.context['offers']), 67)
def test_offers_list_for_organization(self):
"""Test offers' list for account of organization."""
OfferFactory.create_batch(
96,
offer_status='published',
finished_at=None,
recruitment_end_date=None,
)
OfferFactory.create_batch(17, offer_status='unpublished')
OfferFactory.create_batch(22, offer_status='rejected')
self.client.force_login(UserFactory(
userprofile__organizations=[OrganizationFactory()]
))
response = self.client.get('/o/offers')
self._test_offers_list(response)
self.assertEqual(len(response.context['offers']), 96)
def test_offers_list_for_admin(self):
"""Test offers' list for account of admin."""
OfferFactory.create_batch(37)
self.client.force_login(UserFactory(
userprofile__is_administrator=True
))
response = self.client.get('/o/offers')
self._test_offers_list(response)
self.assertEqual(len(response.context['offers']), 37)
|
import unittest
import numpy as np
import time
from src.classes.ElasticsearchManager import ElasticsearchManager
from src.classes.Evaluator import Evaluator
from src.classes.TestPoint import TestPoint
class EvaluationTest(unittest.TestCase):
elasticsearch_manager = ElasticsearchManager()
data2index = [{'song_id': 1, 'order': 3, 'text': 'Sobre la misma columna, abrazados sueño y tiempo, '
'cruza el gemido del niño la lengua rota del viejo.',
'sum': np.array([ 3.8112035 , 6.461136 , -13.84816 , 5.04407 ,
-1.6740245 , -1.2053071 , 3.2149568 , -3.644608 ,
-1.7899718 , -2.0110445 , 0.25535575, 0.49017105,
0.71916217, -4.750382 , 8.204537 , 0.7010732 ,
-2.8565137 , 1.083416 , -14.311666 , 4.689531 ,
4.4421973 , -1.8226001 , 0.3795364 , 3.5189137 ,
-1.9924208 , -3.8792305 , 8.910662 , 34.019318 ,
2.5567718 , -47.496086 , 7.73366 , -0.80188185,
2.6975303 , -6.585491 , 0.5928852 , 11.253685 ,
-0.6661468 , 1.3343561 , 0.05017442, 1.493859 ,
-1.3678993 , 2.9184039 , 3.879466 , -0.4928972 ,
-1.3107916 , 5.591168 , 1.7346828 , 17.410196 ,
-6.0068493 , -4.821767 ], dtype=np.float32),
'avg': np.array([ 0.18148582, 0.30767313, -0.65943605, 0.24019372, -0.07971542,
-0.05739557, 0.15309316, -0.17355275, -0.08523673, -0.09576401,
0.01215978, 0.02334148, 0.03424581, -0.2262086 , 0.39069217,
0.03338442, -0.1360244 , 0.05159124, -0.6815077 , 0.22331098,
0.21153311, -0.08679048, 0.01807315, 0.16756728, -0.09487714,
-0.18472524, 0.4243171 , 1.619967 , 0.12175096, -2.261718 ,
0.36826944, -0.03818485, 0.1284538 , -0.31359473, 0.02823262,
0.5358897 , -0.03172124, 0.06354076, 0.00238924, 0.07113614,
-0.06513806, 0.13897161, 0.18473643, -0.02347128, -0.06241862,
0.266246 , 0.08260392, 0.8290568 , -0.28604037, -0.22960788], dtype=np.float32),
'avg_lr': np.array([ 0.19392723, 0.11801735, -0.32044923, 0.04462085, -0.07447376,
0.04306156, 0.10905372, -0.05030317, -0.36574993, -0.07990918,
-0.08962523, -0.16386826, -0.22537605, -0.01042583, 0.4620869 ,
-0.01444411, -0.25359532, 0.18288183, -0.7531914 , 0.25275943,
-0.12297437, -0.1265708 , -0.5352364 , -0.12142786, -0.05804923,
-0.05342373, 0.17092949, 1.5374788 , -0.33195567, -1.4517102 ,
0.08242837, -0.2842704 , 0.08815224, -0.2780639 , -0.2764373 ,
0.26829422, -0.20610271, -0.03368074, -0.3960486 , 0.07934909,
0.14359465, 0.3164819 , 0.41278106, 0.11252502, 0.0325764 ,
-0.3338063 , -0.29342914, 0.4313024 , -0.06514245, 0.18526307], dtype=np.float32),
'avg_rl': np.array([ 0.14985615, 0.19946195, -0.34792173, 0.28069377, -0.2582239 ,
-0.17086765, 0.01033928, -0.11159404, -0.13844237, -0.04625382,
0.11482433, 0.00899755, -0.02789113, -0.17785698, 0.09764477,
0.23684515, 0.0615494 , -0.24898489, -0.49848396, 0.06383756,
0.14771557, -0.01351789, 0.08085184, 0.01663818, -0.11678985,
0.20054413, 0.09569419, 0.9939368 , 0.10276383, -1.1752803 ,
0.08172406, 0.10021856, 0.17324036, -0.03508691, -0.01389044,
0.11442792, -0.14002573, 0.0332142 , 0.0456631 , -0.03575406,
0.13788545, -0.01529012, 0.12483697, 0.25208715, -0.2671963 ,
0.26442116, 0.48853353, 0.17468967, -0.28593445, -0.39851677], dtype=np.float32),
'ind_lr': np.array([ 0.83816063, 2.84916 , -4.384205 , 1.7014989 ,
-0.44728348, -1.0541391 , 1.311351 , -2.4516284 ,
-0.65873885, -0.99333566, -0.9858063 , 0.21791673,
0.06653951, -0.94553435, 2.9614515 , 0.28354403,
-0.5358636 , 0.65762573, -5.866606 , 2.087831 ,
0.8027413 , -1.2485006 , -0.86632156, 1.1674827 ,
-0.6909008 , -1.797154 , 3.418196 , 11.933183 ,
-0.6288728 , -15.270812 , 2.7424464 , -1.5730083 ,
1.3667605 , -2.3041918 , -0.07635482, 3.5318084 ,
0.23779015, 0.12798578, -1.2832134 , 0.69262743,
-0.33595443, 2.197013 , 1.6172109 , 0.20472884,
-0.681797 , 0.8127152 , -0.40612757, 6.1103 ,
-2.0347562 , -0.7756372 ], dtype=np.float32),
'ind_rl': np.array([ 1.7998058e+00, 1.6621950e+00, -4.9755545e+00, 1.7493361e+00,
-7.1265119e-01, -1.3975640e-01, 4.3191579e-01, -5.0100833e-02,
-1.1383892e+00, -3.8136491e-01, 1.2367058e+00, 5.9856609e-03,
1.4455952e-01, -1.9424580e+00, 2.5219853e+00, 3.2558990e-01,
-1.3634257e+00, -4.3966094e-01, -4.1938901e+00, 9.5286202e-01,
2.3097346e+00, 1.1955422e-02, 7.5862998e-01, 8.7034398e-01,
-8.5799533e-01, -4.3146366e-01, 2.4223366e+00, 1.1383011e+01,
2.1920116e+00, -1.6425179e+01, 2.4184883e+00, 8.4616745e-01,
4.4815612e-01, -2.1926739e+00, 9.1712147e-02, 3.9924145e+00,
-9.6326357e-01, 4.0882218e-01, 1.2426647e+00, 1.1072733e-01,
-2.3166960e-01, -3.9750887e-03, 1.2818067e+00, -2.3328599e-03,
-5.2195448e-01, 2.7477942e+00, 2.0889249e+00, 5.2451692e+00,
-2.2399297e+00, -2.7674379e+00], dtype=np.float32),
'jnt_lr': np.array([ 0.3233068 , 1.0093342 , -1.4547672 , 0.43732688, -0.13290155,
-0.38975897, 0.44655964, -0.90518075, -0.51377463, -0.3918801 ,
-0.54990953, -0.11193292, -0.24339348, -0.07500584, 1.2583451 ,
0.06097966, -0.28588307, 0.35410485, -2.4396284 , 0.8731539 ,
0.05175538, -0.5512834 , -0.8475285 , 0.20698242, -0.2800762 ,
-0.6226798 , 1.1011168 , 4.6666946 , -0.8266682 , -5.1415057 ,
0.8203634 , -0.9316503 , 0.5135308 , -0.90999657, -0.3724221 ,
1.2156518 , -0.06894905, -0.08949597, -0.929269 , 0.24325064,
0.07265904, 1.1465993 , 0.862668 , 0.25654992, -0.19128697,
-0.29890436, -0.49141517, 2.0410845 , -0.6133869 , 0.11361574], dtype=np.float32),
'jnt_rl': np.array([ 0.7989331 , 0.721257 , -1.9651432 , 0.89229673, -0.4566897 ,
-0.31471473, -0.14836027, 0.03957848, -0.72650737, -0.10834058,
0.7638733 , -0.03483372, -0.03953627, -0.7272708 , 0.74324405,
0.34846336, -0.40325707, -0.67631614, -1.793362 , 0.1955108 ,
1.0828704 , 0.09661449, 0.36322337, 0.14888965, -0.4735605 ,
0.2696389 , 0.7805857 , 4.504867 , 1.0460072 , -6.1981277 ,
0.8686409 , 0.4839213 , 0.18244408, -0.7846143 , -0.05805688,
1.3828493 , -0.5360329 , 0.02916135, 0.7182476 , -0.1576386 ,
0.14566483, -0.08702 , 0.47208953, 0.3734332 , -0.51130646,
1.2819313 , 1.5802686 , 1.655284 , -1.1562059 , -1.6153266 ], dtype=np.float32)},
{'song_id': 1, 'order': 4, 'text': 'El sueño va sobre el tiempo flotando como un velero. Nadie puede abrir semillas en el corazón del sueño.',
'sum': np.array([-2.89330602e+00, 2.07143211e+00, -1.43969145e+01, 8.74479198e+00,
-1.05252724e+01, 5.00645685e+00, 4.15580177e+00, -9.00595951e+00,
-3.53039205e-01, -1.78322947e+00, -2.70735547e-02, -2.63288110e-01,
1.18394232e+00, -8.56595612e+00, 1.28102837e+01, 5.90152502e+00,
1.95395303e+00, 7.33172131e+00, -1.33936214e+01, 7.10451508e+00,
-1.79425943e+00, -5.03592777e+00, -7.74336755e-01, -7.95431137e-01,
3.63477874e+00, -3.01156735e+00, 7.11308193e+00, 4.46435509e+01,
-4.23541498e+00, -5.55025177e+01, 9.94962811e-01, -4.10589874e-01,
5.50712585e+00, -5.53023529e+00, 4.03232276e-01, 4.13736200e+00,
1.65698922e+00, 5.08512354e+00, -6.68517542e+00, -2.81402767e-01,
2.60318422e+00, -2.13262844e+00, 7.25675726e+00, -1.16735804e+00,
2.50720000e+00, 3.32147622e+00, 4.25464916e+00, 1.25266132e+01,
-5.32372999e+00, 9.54083264e-01], dtype=np.float32),
'avg': np.array([-1.3777646e-01, 9.8639622e-02, -6.8556732e-01, 4.1641858e-01,
-5.0120342e-01, 2.3840271e-01, 1.9789530e-01, -4.2885521e-01,
-1.6811388e-02, -8.4915675e-02, -1.2892367e-03, -1.2537515e-02,
5.6378193e-02, -4.0790266e-01, 6.1001348e-01, 2.8102499e-01,
9.3045369e-02, 3.4912962e-01, -6.3779134e-01, 3.3831024e-01,
-8.5440904e-02, -2.3980604e-01, -3.6873180e-02, -3.7877671e-02,
1.7308465e-01, -1.4340797e-01, 3.3871818e-01, 2.1258831e+00,
-2.0168641e-01, -2.6429768e+00, 4.7379181e-02, -1.9551899e-02,
2.6224408e-01, -2.6334453e-01, 1.9201543e-02, 1.9701724e-01,
7.8904226e-02, 2.4214874e-01, -3.1834161e-01, -1.3400137e-02,
1.2396113e-01, -1.0155370e-01, 3.4555984e-01, -5.5588473e-02,
1.1939045e-01, 1.5816556e-01, 2.0260239e-01, 5.9650534e-01,
-2.5351089e-01, 4.5432527e-02], dtype=np.float32),
'avg_lr': np.array([ 0.15221712, 0.01680356, -0.27159917, 0.05978177, -0.1294413 ,
0.13677162, 0.05283751, 0.02087875, -0.3912576 , -0.05795757,
0.00639257, -0.08509801, -0.22624512, -0.03958573, 0.4056398 ,
-0.03583173, -0.27597028, 0.19036712, -0.6177855 , 0.17364144,
-0.17924121, -0.1192288 , -0.5132951 , -0.21492776, -0.02338856,
0.06183354, 0.20081927, 1.5143685 , -0.23609233, -1.5326194 ,
0.07134164, -0.18566804, 0.09517938, -0.2366734 , -0.25543636,
0.21957585, -0.19186178, -0.03386737, -0.31405905, 0.02609778,
0.15898977, 0.17519134, 0.46928844, 0.08060586, 0.03494119,
-0.32736334, -0.29203704, 0.38820857, -0.10169771, 0.1885377 ], dtype=np.float32),
'avg_rl': np.array([ 0.02474649, 0.05819888, -0.573494 , 0.42665127, -0.24005497,
0.28954008, 0.09553529, -0.21929392, 0.29730704, 0.10720031,
0.08352794, 0.17329699, 0.04786861, -0.19757096, 0.23520862,
0.05047477, 0.06384695, 0.3810828 , -0.42088643, 0.31991526,
-0.00657294, -0.1602312 , 0.01934432, -0.05308695, 0.15261653,
-0.18768208, 0.2468504 , 1.1301225 , 0.18912414, -1.6818371 ,
-0.0048908 , -0.1138081 , 0.19941933, -0.33451518, 0.32685688,
0.16142951, 0.19340065, 0.29646033, -0.22019152, 0.00283915,
-0.11352554, 0.06445522, 0.16041861, -0.22228748, 0.12385077,
0.1235975 , 0.09817057, 0.49406344, -0.01718818, -0.14590521], dtype=np.float32),
'ind_lr': np.array([ -0.9560043 , 0.74864763, -3.765377 , 2.4052265 ,
-2.77335 , 1.0839007 , 1.2075448 , -2.1991012 ,
-0.6914869 , -0.76155233, 0.09039193, -0.10694574,
0.03332234, -1.7913584 , 3.5899918 , 1.1811031 ,
0.34719622, 2.8550045 , -4.058856 , 1.9895933 ,
-0.9538663 , -1.4185168 , -1.3774501 , -0.70839 ,
0.8904926 , -0.4290378 , 2.50113 , 13.62771 ,
-2.04847 , -16.60081 , 1.122193 , -0.45304167,
1.788585 , -1.783916 , -0.6683887 , 1.7882961 ,
0.7354394 , 1.23965 , -1.9961023 , 0.402608 ,
0.9159312 , -0.31116977, 2.2644942 , 0.1399888 ,
0.6378886 , 0.22950995, 0.14772351, 4.650332 ,
-2.1952698 , 0.632337 ], dtype=np.float32),
'ind_rl': np.array([ -0.581247 , 0.39402282, -4.9604454 , 2.9153156 ,
-3.266195 , 2.0353367 , 0.98867047, -2.6893725 ,
0.62404895, -0.10151213, -0.08678932, 0.48683947,
0.42926547, -2.726957 , 3.5541263 , 1.9105643 ,
0.38475624, 1.7816558 , -4.0001183 , 2.3178246 ,
-0.04037939, -1.6770916 , 0.4089873 , 0.14326122,
1.2136039 , -1.2672318 , 2.1164768 , 13.180422 ,
-0.20970072, -16.815361 , -0.3536604 , 0.05701282,
1.6455649 , -1.7376695 , 1.2953197 , 0.9829821 ,
0.36719003, 2.045711 , -1.7104778 , -0.7316018 ,
0.25689864, -0.29095447, 2.463865 , -0.9082185 ,
0.5469501 , 1.4106001 , 1.8059125 , 3.192147 ,
-1.0163623 , -0.06175064], dtype=np.float32),
'jnt_lr': np.array([-0.07275456, 0.17217042, -1.1263629 , 0.59132475, -0.6857762 ,
0.3987299 , 0.2849644 , -0.3324792 , -0.5218429 , -0.22449493,
-0.03418753, 0.03964447, -0.27699992, -0.21312396, 1.0892309 ,
0.07211531, -0.310736 , 0.927751 , -1.513601 , 0.6188457 ,
-0.4136719 , -0.44481295, -0.9485517 , -0.42960307, 0.18169609,
-0.0129453 , 0.9944089 , 4.7210374 , -0.72786057, -5.4232216 ,
0.4896481 , -0.3219961 , 0.5593054 , -0.6756606 , -0.40184957,
0.7977413 , 0.06200888, 0.29339567, -0.67070514, 0.07990008,
0.30586833, 0.26401204, 1.0705179 , 0.08937053, 0.0528974 ,
-0.41401106, -0.46073523, 1.6320033 , -0.6971418 , 0.4227604 ], dtype=np.float32),
'jnt_rl': np.array([-0.10433966, 0.11216903, -1.6568075 , 1.0434439 , -0.89176613,
0.80805904, 0.20958407, -0.6883172 , 0.46864402, 0.14392492,
0.06814389, 0.43872485, 0.13348505, -0.6933242 , 0.83879924,
0.4133435 , 0.10168462, 0.75389224, -1.2080264 , 0.7677034 ,
0.04103406, -0.5851089 , 0.09137189, 0.0179385 , 0.4091576 ,
-0.44279754, 0.7216208 , 3.78728 , 0.24054618, -5.247924 ,
-0.09463028, -0.06590685, 0.53914046, -0.7077037 , 0.778091 ,
0.38039932, 0.29535806, 0.79152143, -0.43658957, -0.28203836,
-0.18825239, 0.11821863, 0.7924443 , -0.44980785, 0.19513777,
0.42662686, 0.42894912, 1.1275042 , -0.20928451, -0.1669173 ], dtype=np.float32)},
{'song_id': 1, 'order': 5, 'text': 'Y si el sueño finge muros en la llanura del tiempo, el tiempo le hace creer que nace en aquel momento.',
'sum': np.array([ 3.7074268e-02, 1.5430588e+00, -1.7152964e+01, 3.9774880e+00,
-5.3974833e+00, 1.0771395e+01, 6.6784630e+00, -4.5759897e+00,
2.0924686e-01, 1.2154318e+00, 8.0518789e+00, 2.6338562e-01,
3.3570745e+00, -8.2154388e+00, 1.4792612e+01, 2.2173135e+00,
-7.8243124e-01, 6.7036977e+00, -1.7671240e+01, 6.4473681e+00,
-1.7581272e+00, -9.6440786e-01, 4.5143104e+00, 1.3005688e+00,
4.9527577e-01, -5.8810024e+00, 8.0009967e-01, 4.4197514e+01,
-4.3221474e-01, -6.2706039e+01, 7.2572751e+00, -7.6013958e-01,
1.1463945e+00, -8.9373140e+00, 4.5343890e+00, 1.0558471e+01,
4.4871516e+00, 1.5362015e+00, -1.5907261e+00, 4.8438649e+00,
3.3840530e+00, 9.3605125e-01, 5.1252422e+00, -6.5651703e+00,
7.3519959e+00, 6.1627707e+00, 3.4690635e+00, 2.0037996e+01,
-5.3401504e+00, -3.5174913e+00], dtype=np.float32),
'avg': np.array([ 1.6119247e-03, 6.7089483e-02, -7.4578094e-01, 1.7293426e-01,
-2.3467319e-01, 4.6832147e-01, 2.9036790e-01, -1.9895607e-01,
9.0977112e-03, 5.2844871e-02, 3.5008174e-01, 1.1451569e-02,
1.4595973e-01, -3.5719299e-01, 6.4315689e-01, 9.6404947e-02,
-3.4018777e-02, 2.9146510e-01, -7.6831478e-01, 2.8032035e-01,
-7.6440305e-02, -4.1930784e-02, 1.9627437e-01, 5.6546476e-02,
2.1533735e-02, -2.5569573e-01, 3.4786910e-02, 1.9216309e+00,
-1.8791942e-02, -2.7263491e+00, 3.1553367e-01, -3.3049569e-02,
4.9843248e-02, -3.8857883e-01, 1.9714732e-01, 4.5906392e-01,
1.9509353e-01, 6.6791400e-02, -6.9161996e-02, 2.1060281e-01,
1.4713269e-01, 4.0697902e-02, 2.2283661e-01, -2.8544214e-01,
3.1965199e-01, 2.6794654e-01, 1.5082887e-01, 8.7121725e-01,
-2.3218048e-01, -1.5293440e-01], dtype=np.float32),
'avg_lr': np.array([ 1.74142137e-01, -4.12776172e-02, -3.14953357e-01, 5.04224300e-02,
-1.84748262e-01, 2.44230166e-01, 8.18314254e-02, 2.14998703e-02,
-4.17606175e-01, -5.95579632e-02, 1.36689946e-01, -5.35124131e-02,
-2.77189791e-01, 1.11063162e-03, 4.99425590e-01, 4.50880192e-02,
-3.17140341e-01, 2.45671511e-01, -7.37083852e-01, 1.85516179e-01,
-1.53060809e-01, -1.26070946e-01, -4.56448674e-01, -2.11672977e-01,
4.25977856e-02, 5.71176186e-02, 5.94370626e-02, 1.52598047e+00,
-2.70000905e-01, -1.56916082e+00, 8.11051652e-02, -2.23962530e-01,
6.39941320e-02, -1.84817269e-01, -1.86895102e-01, 1.82426453e-01,
-2.23148331e-01, -6.00789450e-02, -3.90294820e-01, 1.04011945e-01,
2.02530459e-01, 2.32314244e-01, 4.58155721e-01, 5.85919507e-02,
1.67503908e-01, -3.17569464e-01, -2.47506812e-01, 4.67474818e-01,
-9.90276933e-02, 1.98683694e-01], dtype=np.float32),
'avg_rl': np.array([ 5.42506874e-01, 6.14591874e-02, -3.80852997e-01, 7.41498470e-02,
1.79457024e-01, 1.77301675e-01, 2.47330397e-01, -1.14368893e-01,
2.21586619e-02, -2.51080871e-01, 1.93734720e-01, -2.13004276e-01,
2.04762891e-01, -2.47040942e-01, 5.27382374e-01, -1.64230123e-01,
-5.45285940e-02, 1.03808329e-01, -2.38486961e-01, 1.08501688e-01,
2.18019530e-01, 1.58441886e-01, 1.33741856e-01, 2.26398855e-01,
-8.10915828e-02, -3.10751855e-01, 1.31267965e-01, 1.19775498e+00,
2.01977640e-01, -1.70817411e+00, 2.45067358e-01, 4.11196798e-01,
3.68274860e-02, -3.89029570e-02, 5.14446422e-02, 5.34862399e-01,
-1.21803246e-01, -1.17766336e-02, -1.55592198e-03, 2.06468850e-01,
4.53350693e-01, -6.34831339e-02, 1.02519654e-01, -1.67760849e-01,
9.19132903e-02, 2.12078854e-01, -2.53156364e-01, 6.00426972e-01,
4.53431100e-01, -2.68470585e-01], dtype=np.float32),
'ind_lr': np.array([ -0.88831717, 0.24831587, -5.1061244 , 0.6849945 ,
-2.2774763 , 3.7854755 , 1.9598377 , -1.001539 ,
-0.7862652 , 0.5724057 , 2.4542897 , 0.5723223 ,
0.27798644, -1.3474704 , 4.575991 , 1.050538 ,
-0.6725811 , 2.0006497 , -5.8091207 , 1.418683 ,
-1.0845457 , -0.65661335, 0.86320126, -0.03474319,
0.78704625, -1.6864686 , -0.5801598 , 13.256074 ,
-1.531362 , -17.839825 , 2.3836026 , -1.0714753 ,
0.47004285, -2.490346 , 0.94096583, 2.276003 ,
1.4099106 , 0.70057124, -0.803312 , 1.695398 ,
1.7381285 , 0.41230646, 2.1620557 , -1.7189971 ,
2.410092 , 0.6165768 , 0.6375734 , 5.6160116 ,
-1.6766409 , 0.09233488], dtype=np.float32),
'ind_rl': np.array([ 1.4224304e+00, 5.3140837e-01, -4.4781504e+00, 1.6375847e+00,
-8.6502707e-01, 2.8587065e+00, 1.8467976e+00, -1.6129864e+00,
4.0780100e-01, -3.4294885e-01, 2.1422455e+00, -4.8183683e-01,
1.4453974e+00, -2.8849034e+00, 4.4971151e+00, -4.3541968e-02,
4.1546759e-01, 1.8422382e+00, -4.1576786e+00, 2.3892343e+00,
-5.9357461e-02, 1.7383495e-01, 1.4253455e+00, 7.0604903e-01,
-3.7320805e-01, -1.7422086e+00, 1.1220577e+00, 1.2491101e+01,
1.3809321e+00, -1.8531551e+01, 2.0918524e+00, 1.2480193e+00,
4.0521243e-01, -2.1114559e+00, 1.5316216e+00, 4.2219634e+00,
7.7784866e-01, -1.4361412e-02, -6.8871933e-01, 1.0588771e+00,
9.6985769e-01, -2.5332203e-02, 1.4189867e+00, -1.9597895e+00,
1.6059254e+00, 2.5315933e+00, 1.0746602e+00, 6.2048283e+00,
-4.5984453e-01, -1.8887380e+00], dtype=np.float32),
'jnt_lr': np.array([-0.11193945, -0.02510553, -1.3462944 , 0.23512045, -0.86718726,
1.2763674 , 0.4894174 , -0.14623585, -0.70204145, 0.03119629,
0.62459207, 0.24097115, -0.41607884, -0.00637874, 1.595245 ,
0.3419923 , -0.45965475, 0.7574518 , -1.9743053 , 0.533826 ,
-0.4627424 , -0.38840663, -0.2297261 , -0.30239177, 0.37294456,
-0.3116396 , -0.05621603, 4.3600464 , -0.73693925, -5.370359 ,
0.7200255 , -0.4791905 , 0.3171538 , -0.5955046 , 0.07319058,
0.71895117, 0.04575472, 0.08656178, -0.735977 , 0.4660712 ,
0.6565869 , 0.36776865, 1.0766151 , -0.23433594, 0.67487353,
-0.32533512, -0.05832986, 1.8175799 , -0.4528809 , 0.42797306], dtype=np.float32),
'jnt_rl': np.array([ 1.0576944 , 0.1340197 , -1.4327841 , 0.489822 , 0.05540311,
0.90329725, 0.64011097, -0.49385288, 0.08391705, -0.5256082 ,
0.6916369 , -0.4263655 , 0.6441632 , -0.9098489 , 1.6087046 ,
-0.3030429 , 0.20321709, 0.48924682, -1.0995408 , 0.68954486,
0.16672449, 0.31775123, 0.3770052 , 0.45145768, -0.17302163,
-0.7325166 , 0.40739253, 4.0230436 , 0.7156022 , -6.0599113 ,
0.74057186, 1.0564579 , 0.10942155, -0.33744887, 0.3318163 ,
1.5930433 , 0.00784878, -0.03644298, -0.29792687, 0.37913224,
0.8267147 , -0.15115644, 0.5142379 , -0.7156059 , 0.48489606,
0.80901426, -0.06616106, 2.0172307 , 0.7023924 , -0.7523226 ], dtype=np.float32)},
{'song_id': 1, 'order': 6, 'text': 'El sueño va sobre el tiempo flotando como un velero. Nadie puede abrir semillas en el corazón del sueño.',
'sum': np.array([-2.89330602e+00, 2.07143211e+00, -1.43969145e+01, 8.74479198e+00,
-1.05252724e+01, 5.00645685e+00, 4.15580177e+00, -9.00595951e+00,
-3.53039205e-01, -1.78322947e+00, -2.70735547e-02, -2.63288110e-01,
1.18394232e+00, -8.56595612e+00, 1.28102837e+01, 5.90152502e+00,
1.95395303e+00, 7.33172131e+00, -1.33936214e+01, 7.10451508e+00,
-1.79425943e+00, -5.03592777e+00, -7.74336755e-01, -7.95431137e-01,
3.63477874e+00, -3.01156735e+00, 7.11308193e+00, 4.46435509e+01,
-4.23541498e+00, -5.55025177e+01, 9.94962811e-01, -4.10589874e-01,
5.50712585e+00, -5.53023529e+00, 4.03232276e-01, 4.13736200e+00,
1.65698922e+00, 5.08512354e+00, -6.68517542e+00, -2.81402767e-01,
2.60318422e+00, -2.13262844e+00, 7.25675726e+00, -1.16735804e+00,
2.50720000e+00, 3.32147622e+00, 4.25464916e+00, 1.25266132e+01,
-5.32372999e+00, 9.54083264e-01], dtype=np.float32),
'avg': np.array([-1.3777646e-01, 9.8639622e-02, -6.8556732e-01, 4.1641858e-01,
-5.0120342e-01, 2.3840271e-01, 1.9789530e-01, -4.2885521e-01,
-1.6811388e-02, -8.4915675e-02, -1.2892367e-03, -1.2537515e-02,
5.6378193e-02, -4.0790266e-01, 6.1001348e-01, 2.8102499e-01,
9.3045369e-02, 3.4912962e-01, -6.3779134e-01, 3.3831024e-01,
-8.5440904e-02, -2.3980604e-01, -3.6873180e-02, -3.7877671e-02,
1.7308465e-01, -1.4340797e-01, 3.3871818e-01, 2.1258831e+00,
-2.0168641e-01, -2.6429768e+00, 4.7379181e-02, -1.9551899e-02,
2.6224408e-01, -2.6334453e-01, 1.9201543e-02, 1.9701724e-01,
7.8904226e-02, 2.4214874e-01, -3.1834161e-01, -1.3400137e-02,
1.2396113e-01, -1.0155370e-01, 3.4555984e-01, -5.5588473e-02,
1.1939045e-01, 1.5816556e-01, 2.0260239e-01, 5.9650534e-01,
-2.5351089e-01, 4.5432527e-02], dtype=np.float32),
'avg_lr': np.array([ 0.15221712, 0.01680356, -0.27159917, 0.05978177, -0.1294413 ,
0.13677162, 0.05283751, 0.02087875, -0.3912576 , -0.05795757,
0.00639257, -0.08509801, -0.22624512, -0.03958573, 0.4056398 ,
-0.03583173, -0.27597028, 0.19036712, -0.6177855 , 0.17364144,
-0.17924121, -0.1192288 , -0.5132951 , -0.21492776, -0.02338856,
0.06183354, 0.20081927, 1.5143685 , -0.23609233, -1.5326194 ,
0.07134164, -0.18566804, 0.09517938, -0.2366734 , -0.25543636,
0.21957585, -0.19186178, -0.03386737, -0.31405905, 0.02609778,
0.15898977, 0.17519134, 0.46928844, 0.08060586, 0.03494119,
-0.32736334, -0.29203704, 0.38820857, -0.10169771, 0.1885377 ], dtype=np.float32),
'avg_rl': np.array([ 0.02474649, 0.05819888, -0.573494 , 0.42665127, -0.24005497,
0.28954008, 0.09553529, -0.21929392, 0.29730704, 0.10720031,
0.08352794, 0.17329699, 0.04786861, -0.19757096, 0.23520862,
0.05047477, 0.06384695, 0.3810828 , -0.42088643, 0.31991526,
-0.00657294, -0.1602312 , 0.01934432, -0.05308695, 0.15261653,
-0.18768208, 0.2468504 , 1.1301225 , 0.18912414, -1.6818371 ,
-0.0048908 , -0.1138081 , 0.19941933, -0.33451518, 0.32685688,
0.16142951, 0.19340065, 0.29646033, -0.22019152, 0.00283915,
-0.11352554, 0.06445522, 0.16041861, -0.22228748, 0.12385077,
0.1235975 , 0.09817057, 0.49406344, -0.01718818, -0.14590521], dtype=np.float32),
'ind_lr': np.array([ -0.9560043 , 0.74864763, -3.765377 , 2.4052265 ,
-2.77335 , 1.0839007 , 1.2075448 , -2.1991012 ,
-0.6914869 , -0.76155233, 0.09039193, -0.10694574,
0.03332234, -1.7913584 , 3.5899918 , 1.1811031 ,
0.34719622, 2.8550045 , -4.058856 , 1.9895933 ,
-0.9538663 , -1.4185168 , -1.3774501 , -0.70839 ,
0.8904926 , -0.4290378 , 2.50113 , 13.62771 ,
-2.04847 , -16.60081 , 1.122193 , -0.45304167,
1.788585 , -1.783916 , -0.6683887 , 1.7882961 ,
0.7354394 , 1.23965 , -1.9961023 , 0.402608 ,
0.9159312 , -0.31116977, 2.2644942 , 0.1399888 ,
0.6378886 , 0.22950995, 0.14772351, 4.650332 ,
-2.1952698 , 0.632337 ], dtype=np.float32),
'ind_rl': np.array([ -0.581247 , 0.39402282, -4.9604454 , 2.9153156 ,
-3.266195 , 2.0353367 , 0.98867047, -2.6893725 ,
0.62404895, -0.10151213, -0.08678932, 0.48683947,
0.42926547, -2.726957 , 3.5541263 , 1.9105643 ,
0.38475624, 1.7816558 , -4.0001183 , 2.3178246 ,
-0.04037939, -1.6770916 , 0.4089873 , 0.14326122,
1.2136039 , -1.2672318 , 2.1164768 , 13.180422 ,
-0.20970072, -16.815361 , -0.3536604 , 0.05701282,
1.6455649 , -1.7376695 , 1.2953197 , 0.9829821 ,
0.36719003, 2.045711 , -1.7104778 , -0.7316018 ,
0.25689864, -0.29095447, 2.463865 , -0.9082185 ,
0.5469501 , 1.4106001 , 1.8059125 , 3.192147 ,
-1.0163623 , -0.06175064], dtype=np.float32),
'jnt_lr': np.array([-0.07275456, 0.17217042, -1.1263629 , 0.59132475, -0.6857762 ,
0.3987299 , 0.2849644 , -0.3324792 , -0.5218429 , -0.22449493,
-0.03418753, 0.03964447, -0.27699992, -0.21312396, 1.0892309 ,
0.07211531, -0.310736 , 0.927751 , -1.513601 , 0.6188457 ,
-0.4136719 , -0.44481295, -0.9485517 , -0.42960307, 0.18169609,
-0.0129453 , 0.9944089 , 4.7210374 , -0.72786057, -5.4232216 ,
0.4896481 , -0.3219961 , 0.5593054 , -0.6756606 , -0.40184957,
0.7977413 , 0.06200888, 0.29339567, -0.67070514, 0.07990008,
0.30586833, 0.26401204, 1.0705179 , 0.08937053, 0.0528974 ,
-0.41401106, -0.46073523, 1.6320033 , -0.6971418 , 0.4227604 ], dtype=np.float32),
'jnt_rl': np.array([-0.10433966, 0.11216903, -1.6568075 , 1.0434439 , -0.89176613,
0.80805904, 0.20958407, -0.6883172 , 0.46864402, 0.14392492,
0.06814389, 0.43872485, 0.13348505, -0.6933242 , 0.83879924,
0.4133435 , 0.10168462, 0.75389224, -1.2080264 , 0.7677034 ,
0.04103406, -0.5851089 , 0.09137189, 0.0179385 , 0.4091576 ,
-0.44279754, 0.7216208 , 3.78728 , 0.24054618, -5.247924 ,
-0.09463028, -0.06590685, 0.53914046, -0.7077037 , 0.778091 ,
0.38039932, 0.29535806, 0.79152143, -0.43658957, -0.28203836,
-0.18825239, 0.11821863, 0.7924443 , -0.44980785, 0.19513777,
0.42662686, 0.42894912, 1.1275042 , -0.20928451, -0.1669173 ], dtype=np.float32)},
{'song_id': 1, 'order': 7, 'text': 'El sueño va sobre el tiempo flotando como un velero. Nadie puede abrir semillas en el corazón del sueño.',
'sum': np.array([-2.89330602e+00, 2.07143211e+00, -1.43969145e+01, 8.74479198e+00,
-1.05252724e+01, 5.00645685e+00, 4.15580177e+00, -9.00595951e+00,
-3.53039205e-01, -1.78322947e+00, -2.70735547e-02, -2.63288110e-01,
1.18394232e+00, -8.56595612e+00, 1.28102837e+01, 5.90152502e+00,
1.95395303e+00, 7.33172131e+00, -1.33936214e+01, 7.10451508e+00,
-1.79425943e+00, -5.03592777e+00, -7.74336755e-01, -7.95431137e-01,
3.63477874e+00, -3.01156735e+00, 7.11308193e+00, 4.46435509e+01,
-4.23541498e+00, -5.55025177e+01, 9.94962811e-01, -4.10589874e-01,
5.50712585e+00, -5.53023529e+00, 4.03232276e-01, 4.13736200e+00,
1.65698922e+00, 5.08512354e+00, -6.68517542e+00, -2.81402767e-01,
2.60318422e+00, -2.13262844e+00, 7.25675726e+00, -1.16735804e+00,
2.50720000e+00, 3.32147622e+00, 4.25464916e+00, 1.25266132e+01,
-5.32372999e+00, 9.54083264e-01], dtype=np.float32),
'avg': np.array([-1.3777646e-01, 9.8639622e-02, -6.8556732e-01, 4.1641858e-01,
-5.0120342e-01, 2.3840271e-01, 1.9789530e-01, -4.2885521e-01,
-1.6811388e-02, -8.4915675e-02, -1.2892367e-03, -1.2537515e-02,
5.6378193e-02, -4.0790266e-01, 6.1001348e-01, 2.8102499e-01,
9.3045369e-02, 3.4912962e-01, -6.3779134e-01, 3.3831024e-01,
-8.5440904e-02, -2.3980604e-01, -3.6873180e-02, -3.7877671e-02,
1.7308465e-01, -1.4340797e-01, 3.3871818e-01, 2.1258831e+00,
-2.0168641e-01, -2.6429768e+00, 4.7379181e-02, -1.9551899e-02,
2.6224408e-01, -2.6334453e-01, 1.9201543e-02, 1.9701724e-01,
7.8904226e-02, 2.4214874e-01, -3.1834161e-01, -1.3400137e-02,
1.2396113e-01, -1.0155370e-01, 3.4555984e-01, -5.5588473e-02,
1.1939045e-01, 1.5816556e-01, 2.0260239e-01, 5.9650534e-01,
-2.5351089e-01, 4.5432527e-02], dtype=np.float32),
'avg_lr': np.array([ 0.15221712, 0.01680356, -0.27159917, 0.05978177, -0.1294413 ,
0.13677162, 0.05283751, 0.02087875, -0.3912576 , -0.05795757,
0.00639257, -0.08509801, -0.22624512, -0.03958573, 0.4056398 ,
-0.03583173, -0.27597028, 0.19036712, -0.6177855 , 0.17364144,
-0.17924121, -0.1192288 , -0.5132951 , -0.21492776, -0.02338856,
0.06183354, 0.20081927, 1.5143685 , -0.23609233, -1.5326194 ,
0.07134164, -0.18566804, 0.09517938, -0.2366734 , -0.25543636,
0.21957585, -0.19186178, -0.03386737, -0.31405905, 0.02609778,
0.15898977, 0.17519134, 0.46928844, 0.08060586, 0.03494119,
-0.32736334, -0.29203704, 0.38820857, -0.10169771, 0.1885377 ], dtype=np.float32),
'avg_rl': np.array([ 0.02474649, 0.05819888, -0.573494 , 0.42665127, -0.24005497,
0.28954008, 0.09553529, -0.21929392, 0.29730704, 0.10720031,
0.08352794, 0.17329699, 0.04786861, -0.19757096, 0.23520862,
0.05047477, 0.06384695, 0.3810828 , -0.42088643, 0.31991526,
-0.00657294, -0.1602312 , 0.01934432, -0.05308695, 0.15261653,
-0.18768208, 0.2468504 , 1.1301225 , 0.18912414, -1.6818371 ,
-0.0048908 , -0.1138081 , 0.19941933, -0.33451518, 0.32685688,
0.16142951, 0.19340065, 0.29646033, -0.22019152, 0.00283915,
-0.11352554, 0.06445522, 0.16041861, -0.22228748, 0.12385077,
0.1235975 , 0.09817057, 0.49406344, -0.01718818, -0.14590521], dtype=np.float32),
'ind_lr': np.array([ -0.9560043 , 0.74864763, -3.765377 , 2.4052265 ,
-2.77335 , 1.0839007 , 1.2075448 , -2.1991012 ,
-0.6914869 , -0.76155233, 0.09039193, -0.10694574,
0.03332234, -1.7913584 , 3.5899918 , 1.1811031 ,
0.34719622, 2.8550045 , -4.058856 , 1.9895933 ,
-0.9538663 , -1.4185168 , -1.3774501 , -0.70839 ,
0.8904926 , -0.4290378 , 2.50113 , 13.62771 ,
-2.04847 , -16.60081 , 1.122193 , -0.45304167,
1.788585 , -1.783916 , -0.6683887 , 1.7882961 ,
0.7354394 , 1.23965 , -1.9961023 , 0.402608 ,
0.9159312 , -0.31116977, 2.2644942 , 0.1399888 ,
0.6378886 , 0.22950995, 0.14772351, 4.650332 ,
-2.1952698 , 0.632337 ], dtype=np.float32),
'ind_rl': np.array([ -0.581247 , 0.39402282, -4.9604454 , 2.9153156 ,
-3.266195 , 2.0353367 , 0.98867047, -2.6893725 ,
0.62404895, -0.10151213, -0.08678932, 0.48683947,
0.42926547, -2.726957 , 3.5541263 , 1.9105643 ,
0.38475624, 1.7816558 , -4.0001183 , 2.3178246 ,
-0.04037939, -1.6770916 , 0.4089873 , 0.14326122,
1.2136039 , -1.2672318 , 2.1164768 , 13.180422 ,
-0.20970072, -16.815361 , -0.3536604 , 0.05701282,
1.6455649 , -1.7376695 , 1.2953197 , 0.9829821 ,
0.36719003, 2.045711 , -1.7104778 , -0.7316018 ,
0.25689864, -0.29095447, 2.463865 , -0.9082185 ,
0.5469501 , 1.4106001 , 1.8059125 , 3.192147 ,
-1.0163623 , -0.06175064], dtype=np.float32),
'jnt_lr': np.array([-0.07275456, 0.17217042, -1.1263629 , 0.59132475, -0.6857762 ,
0.3987299 , 0.2849644 , -0.3324792 , -0.5218429 , -0.22449493,
-0.03418753, 0.03964447, -0.27699992, -0.21312396, 1.0892309 ,
0.07211531, -0.310736 , 0.927751 , -1.513601 , 0.6188457 ,
-0.4136719 , -0.44481295, -0.9485517 , -0.42960307, 0.18169609,
-0.0129453 , 0.9944089 , 4.7210374 , -0.72786057, -5.4232216 ,
0.4896481 , -0.3219961 , 0.5593054 , -0.6756606 , -0.40184957,
0.7977413 , 0.06200888, 0.29339567, -0.67070514, 0.07990008,
0.30586833, 0.26401204, 1.0705179 , 0.08937053, 0.0528974 ,
-0.41401106, -0.46073523, 1.6320033 , -0.6971418 , 0.4227604 ], dtype=np.float32),
'jnt_rl': np.array([-0.10433966, 0.11216903, -1.6568075 , 1.0434439 , -0.89176613,
0.80805904, 0.20958407, -0.6883172 , 0.46864402, 0.14392492,
0.06814389, 0.43872485, 0.13348505, -0.6933242 , 0.83879924,
0.4133435 , 0.10168462, 0.75389224, -1.2080264 , 0.7677034 ,
0.04103406, -0.5851089 , 0.09137189, 0.0179385 , 0.4091576 ,
-0.44279754, 0.7216208 , 3.78728 , 0.24054618, -5.247924 ,
-0.09463028, -0.06590685, 0.53914046, -0.7077037 , 0.778091 ,
0.38039932, 0.29535806, 0.79152143, -0.43658957, -0.28203836,
-0.18825239, 0.11821863, 0.7924443 , -0.44980785, 0.19513777,
0.42662686, 0.42894912, 1.1275042 , -0.20928451, -0.1669173 ], dtype=np.float32)},
{'song_id': 2, 'order': 0, 'text': 'Sólo una palabra se hubiera llevado el dolor. Con el beso amargo de aquel licor hubiera bastado, mi amor. Sólo una mentira se viene conmigo a pasear. Sentirme querida en aquel abrazo en el mar.',
'sum': np.array([-7.8626029e-02, 4.7850766e+00, -3.5608944e+01, 7.6312656e+00,
-8.0689411e+00, 9.2695522e+00, 3.8933334e+00, -4.9970889e+00,
-3.4967387e+00, -3.7263284e+00, -1.1994762e+00, 5.3971148e+00,
2.5005245e+00, -8.3190193e+00, 2.6459009e+01, 4.1727428e+00,
-1.6095663e+00, 3.2889857e+00, -2.8293814e+01, 1.5457506e+00,
-3.3779242e+00, -7.0554538e+00, 1.6924628e+00, -4.3592138e+00,
-5.4211318e-01, -5.4550109e+00, 2.7400827e+00, 6.7412201e+01,
-1.4239861e+01, -1.0226576e+02, 1.5503265e+01, 4.5056067e+00,
6.0363808e+00, -9.9129076e+00, -3.1817148e+00, 1.4654828e+01,
7.7338636e-01, -1.1908458e+00, 1.4551756e+00, 3.6819949e+00,
9.3122473e+00, -2.5088515e+00, 1.2683097e+01, -2.5457325e+00,
1.0792832e+01, 6.6580181e+00, 2.3454020e+00, 3.1937572e+01,
-8.2048559e+00, 1.9298853e+00], dtype=np.float32),
'avg': np.array([-1.96564803e-03, 1.19626917e-01, -8.90223861e-01, 1.90781683e-01,
-2.01723531e-01, 2.31738850e-01, 9.73333418e-02, -1.24927238e-01,
-8.74185115e-02, -9.31582823e-02, -2.99869236e-02, 1.34927914e-01,
6.25131354e-02, -2.07975537e-01, 6.61475420e-01, 1.04318574e-01,
-4.02391665e-02, 8.22246447e-02, -7.07345486e-01, 3.86437476e-02,
-8.44481438e-02, -1.76386386e-01, 4.23115753e-02, -1.08980373e-01,
-1.35528203e-02, -1.36375308e-01, 6.85020685e-02, 1.68530524e+00,
-3.55996579e-01, -2.55664492e+00, 3.87581676e-01, 1.12640224e-01,
1.50909528e-01, -2.47822732e-01, -7.95429200e-02, 3.66370738e-01,
1.93346646e-02, -2.97711492e-02, 3.63794044e-02, 9.20498818e-02,
2.32806206e-01, -6.27213046e-02, 3.17077488e-01, -6.36433139e-02,
2.69820869e-01, 1.66450426e-01, 5.86350337e-02, 7.98439443e-01,
-2.05121428e-01, 4.82471511e-02], dtype=np.float32),
'avg_lr': np.array([ 0.14199916, 0.02196441, -0.26233214, 0.06813581, -0.17161568,
0.05095826, 0.13687135, -0.03430529, -0.33590755, -0.01575495,
-0.00190266, -0.19369236, -0.20261627, -0.13447344, 0.41365144,
0.02979007, -0.30115262, 0.15655047, -0.70694804, 0.1908064 ,
-0.16084269, -0.12840278, -0.51689017, -0.16014259, -0.10868623,
0.04306707, 0.18777655, 1.6248049 , -0.18348761, -1.5613152 ,
0.14901404, -0.18556105, 0.12604755, -0.24883829, -0.13825409,
0.23313111, -0.2608478 , 0.00622602, -0.3019497 , 0.05540373,
0.22253372, 0.14453398, 0.40559757, 0.05512966, -0.01495368,
-0.2308537 , -0.14112705, 0.33975953, -0.14590496, 0.13584396], dtype=np.float32),
'avg_rl': np.array([ 0.07474235, -0.05095566, -0.22617638, -0.06554157, -0.09453937,
0.08451876, 0.00985148, -0.10619742, -0.12240447, -0.41663325,
0.08398677, 0.01123814, -0.05156585, -0.1877184 , 0.11936022,
0.27408338, -0.05684368, -0.09605438, -0.4296282 , -0.21282086,
0.09416147, 0.07842438, 0.22381422, -0.11188604, 0.0743604 ,
0.1922786 , -0.03412326, 0.6343792 , -0.15017277, -1.1033711 ,
0.21249102, 0.13308194, -0.03777281, 0.03099772, -0.23990418,
0.45328417, 0.19085275, 0.10002052, -0.03960982, -0.00324762,
0.28355742, -0.24446656, 0.2168394 , -0.11711513, 0.08560605,
0.146702 , -0.03206912, 0.23096864, -0.15287626, 0.0636689 ], dtype=np.float32),
'ind_lr': np.array([-7.9159403e-01, 1.7927700e+00, -6.4445920e+00, 1.9980592e+00,
-1.9932464e+00, 2.5297921e+00, 1.5233722e+00, -9.7321683e-01,
-1.3529708e+00, 1.0601221e+00, -8.4024690e-02, -5.3200346e-01,
-8.5539252e-01, -2.0367906e+00, 6.5219326e+00, 1.4957850e+00,
-3.7355861e-01, 1.8527262e+00, -6.9581208e+00, 1.6691885e+00,
-2.1661887e-01, -3.1153944e+00, -8.1595135e-01, -4.5478377e-01,
-9.3022205e-02, -1.4289653e+00, 1.4525416e+00, 1.7665562e+01,
-1.6914312e+00, -2.3457352e+01, 4.4446325e+00, -1.3680130e-02,
1.8207887e+00, -2.5583873e+00, -3.1378046e-01, 3.7624724e+00,
-9.2990196e-01, 7.1911767e-02, -5.7913095e-01, 1.1852190e+00,
3.6369994e+00, -9.5091954e-02, 3.4518147e+00, -3.6591211e-01,
1.9859008e+00, 2.1097794e+00, 1.6763264e+00, 7.6142521e+00,
-2.1574819e+00, 8.0468988e-01], dtype=np.float32),
'ind_rl': np.array([ 0.36036682, 0.2659102 , -9.383336 , 1.6540643 ,
-1.8544552 , 2.040525 , 0.38623747, -2.0674589 ,
-0.17811717, -2.735481 , 0.06497915, 2.6247444 ,
2.2524524 , -2.3304799 , 5.090027 , 1.520588 ,
-0.6566833 , -0.18054059, -7.5285506 , -0.92486453,
-1.2472353 , -0.12844966, 1.6787264 , -2.0070782 ,
-0.03883704, -0.50122124, 0.32023174, 14.670791 ,
-4.442268 , -24.936234 , 2.6862175 , 1.4963087 ,
1.5288913 , -2.6639318 , -0.87262005, 3.829262 ,
1.822982 , -0.3031184 , 0.1810417 , 1.0019404 ,
1.5819707 , -1.3070658 , 3.0441573 , -1.0136857 ,
2.2960339 , 1.4062893 , 0.93077624, 7.0936513 ,
-2.3005993 , -0.44394115], dtype=np.float32),
'jnt_lr': np.array([-0.0839625 , 0.24048713, -1.1895107 , 0.56045467, -0.67589134,
0.43034172, 0.6242535 , -0.32966405, -0.34523883, 0.3096628 ,
-0.07211331, -0.3715488 , -0.3946914 , -0.5581036 , 1.3895862 ,
0.34743702, -0.48660532, 0.63605994, -2.0166023 , 0.67152756,
-0.29650053, -0.6388187 , -0.74196684, -0.2563295 , -0.17456228,
-0.15808956, 0.7161918 , 5.0437737 , -0.21780226, -5.663267 ,
0.94590265, -0.36517897, 0.6920469 , -0.82853764, 0.1341338 ,
1.0255041 , -0.4359265 , 0.24434747, -0.6831524 , 0.33617684,
0.8282248 , 0.12579542, 0.988088 , -0.08863087, 0.03329732,
0.11094715, 0.38468227, 1.584192 , -0.64219654, 0.19912057], dtype=np.float32),
'jnt_rl': np.array([ 0.20647103, 0.01236998, -1.8000268 , -0.10444962, -0.4015947 ,
0.40114352, -0.14987926, -0.7043168 , -0.37886372, -1.3385748 ,
0.4660927 , 0.494431 , 0.55821884, -0.8207798 , 0.8894527 ,
0.9987336 , -0.15299855, -0.55325717, -2.18829 , -0.7889688 ,
0.07815305, 0.22223225, 0.7664324 , -0.6053459 , 0.13772383,
0.33092687, -0.03297511, 3.220633 , -1.1705496 , -5.9296546 ,
0.7614249 , 0.5233072 , 0.04951929, -0.49019626, -0.7151106 ,
1.4522064 , 0.79439867, 0.02097619, -0.03480822, 0.19162492,
0.93948597, -0.72598076, 1.0116435 , -0.44962582, 0.44274494,
0.6279283 , 0.45635882, 1.3751881 , -0.8750648 , -0.10692106], dtype=np.float32)},
{'song_id': 2, 'order': 1, 'text': 'Con el vestido azul que un día conociste, me marcho sin saber si me besaste antes de irte. Te di mi corazón y tú lo regalaste. Te di todo el amor que pude darte y me robaste. He rasgado mi vestido con una copa de vino. Hoy tu amor corta como el cristal.',
'sum': np.array([ 14.745233 , 2.4776866 , -58.947613 , 5.5801597 ,
-10.987661 , 8.967023 , 9.315319 , -6.8846097 ,
-9.562434 , -7.9481583 , 11.196522 , 0.32987642,
3.11568 , -5.8561454 , 43.733368 , -8.561116 ,
-11.447201 , 8.155723 , -42.931442 , -0.5282845 ,
-8.326199 , 1.5662978 , 0.2388561 , -10.620669 ,
0.6015979 , -15.834497 , 15.293006 , 119.48269 ,
-16.978409 , -151.56482 , 21.228725 , 25.447939 ,
12.103647 , -7.324197 , -7.0838985 , 7.623265 ,
6.3472733 , -5.896529 , -1.0419356 , 6.533429 ,
13.783035 , -5.132701 , 21.637701 , -9.172232 ,
29.049795 , 8.360209 , -15.099624 , 49.258717 ,
4.36083 , -10.000129 ], dtype=np.float32),
'avg': np.array([ 0.24991912, 0.04199468, -0.9991118 , 0.09457894, -0.18623151,
0.15198332, 0.15788667, -0.11668825, -0.16207507, -0.1347145 ,
0.18977149, 0.00559114, 0.05280811, -0.09925666, 0.74124324,
-0.1451036 , -0.19402027, 0.1382325 , -0.72765124, -0.00895397,
-0.14112192, 0.02654741, 0.00404841, -0.18001133, 0.01019657,
-0.26838112, 0.25920343, 2.025129 , -0.28776947, -2.5688941 ,
0.3598088 , 0.4313209 , 0.20514649, -0.1241389 , -0.12006603,
0.12920783, 0.10758084, -0.09994116, -0.01765993, 0.11073606,
0.2336107 , -0.08699486, 0.3667405 , -0.15546152, 0.49236912,
0.14169838, -0.25592571, 0.8348932 , 0.07391232, -0.16949365], dtype=np.float32),
'avg_lr': np.array([ 0.2293695 , 0.01606235, -0.25669116, 0.12473683, -0.18548532,
0.07767809, 0.09663443, -0.08684273, -0.34259745, -0.11695636,
-0.01505356, -0.08726294, -0.24512479, -0.05167608, 0.4240248 ,
0.06292959, -0.24676494, 0.09346008, -0.7494915 , 0.14345607,
-0.20507942, -0.03453419, -0.4974485 , -0.15910338, -0.07312362,
0.06021085, 0.16822624, 1.6543134 , -0.23254782, -1.5696731 ,
0.01885407, -0.18441987, 0.12901253, -0.25603428, -0.26052237,
0.14862064, -0.33424416, -0.06939841, -0.3850662 , 0.0232424 ,
0.26678443, 0.18589659, 0.434484 , 0.13778731, 0.0281527 ,
-0.27937078, -0.20493741, 0.43678027, -0.07315538, 0.06134717], dtype=np.float32),
'avg_rl': np.array([ 0.18822959, 0.16638993, -0.6524491 , 0.33651572, 0.01363976,
0.1310853 , 0.07625078, 0.08783372, 0.22199959, -0.09427652,
-0.12431835, 0.20578286, 0.05649907, -0.25877434, 0.21763892,
-0.09904815, 0.21865465, -0.08132949, -0.12218109, -0.15815625,
0.13789791, -0.196628 , 0.17853943, 0.05135369, -0.186539 ,
-0.03793556, 0.18801981, 1.1082488 , 0.16381325, -1.6158206 ,
0.07541126, 0.1514188 , 0.01183014, -0.05929884, 0.18259417,
0.30054057, 0.253322 , -0.00813799, 0.06171909, -0.03571669,
-0.39027303, -0.0796852 , 0.23531798, 0.25901696, 0.08330509,
0.04036889, -0.14436221, 0.4061203 , 0.333737 , -0.22642551], dtype=np.float32),
'ind_lr': np.array([ 3.54113865e+00, 8.79221976e-01, -1.06295395e+01, 2.86547804e+00,
-3.28372002e+00, 8.44653070e-01, 1.49221408e+00, -2.52652216e+00,
-3.53658229e-01, -2.04600763e+00, 8.38721395e-01, 1.38008606e+00,
6.84743822e-01, -1.13161027e+00, 8.54407501e+00, -1.43587136e+00,
-1.10377932e+00, 3.51375252e-01, -8.15022469e+00, 3.45924556e-01,
-1.12189066e+00, -4.70861226e-01, -2.62766659e-01, -1.66183496e+00,
8.09233859e-02, -1.86436534e+00, 2.45545387e+00, 2.33173409e+01,
-2.29831409e+00, -2.89886932e+01, 3.13060832e+00, 1.59623051e+00,
2.93754458e+00, -2.04311323e+00, -1.82091987e+00, 2.96150136e+00,
-1.09927547e+00, -6.21324241e-01, -1.25215089e+00, 2.07289923e-02,
3.18397474e+00, -1.24266349e-01, 4.14356422e+00, -6.79862916e-01,
3.19391227e+00, 9.56689477e-01, -1.69992101e+00, 9.92202187e+00,
-9.68006492e-01, -2.44480038e+00], dtype=np.float32),
'ind_rl': np.array([ 2.3526323 , 2.0479527 , -11.63405 , 1.960652 ,
-1.415091 , 2.243291 , 3.0877366 , -1.2414299 ,
-1.8316973 , -0.6952233 , 0.91645205, 0.38252982,
0.4613213 , -2.0295236 , 7.410713 , -0.85818976,
-1.1620741 , 1.1989684 , -7.59602 , -0.33371493,
-2.526754 , 0.04954249, 1.4437711 , -0.8996646 ,
-0.40482092, -4.601296 , 3.961836 , 22.6955 ,
-2.1875575 , -28.605888 , 4.25427 , 5.96667 ,
2.3753386 , -2.1044002 , 0.48174155, 2.2514112 ,
2.511031 , -0.43457428, -0.3764132 , -0.10076782,
0.6286479 , -1.5774777 , 4.6247687 , -1.9795196 ,
5.233599 , 1.1174657 , -2.197491 , 8.868578 ,
1.9916602 , -0.12297532], dtype=np.float32),
'jnt_lr': np.array([ 0.52279216, 0.08147442, -1.2409239 , 0.8506231 , -0.7918593 ,
0.2510835 , 0.45180154, -0.62306106, -0.17077513, -0.4067668 ,
-0.12977946, 0.22690302, -0.17799991, -0.3288946 , 1.2538271 ,
0.21720532, -0.24624085, 0.06378559, -1.9260312 , 0.3093948 ,
-0.46989337, -0.0166666 , -0.56915796, -0.13857685, 0.04210274,
-0.11200007, 0.7281482 , 4.879744 , -0.27921158, -5.2826242 ,
0.19806322, -0.21648954, 0.65323377, -0.76841027, -0.43372858,
0.4870596 , -0.75200814, 0.03033265, -0.85288614, -0.13915287,
0.81036353, 0.3124976 , 0.948528 , -0.03891234, 0.18536118,
-0.15484503, -0.09455603, 1.6437193 , -0.2675766 , -0.27775067], dtype=np.float32),
'jnt_rl': np.array([ 0.58522433, 0.7825498 , -2.274107 , 1.0032051 , -0.27903965,
0.48551327, 0.58701086, -0.20755239, 0.4449389 , -0.0241698 ,
-0.47204927, 0.6372088 , 0.05652557, -0.68243486, 1.1387159 ,
-0.28376958, 0.6590365 , -0.09336448, -0.9803842 , -0.47475842,
-0.10330945, -0.5978906 , 0.6806751 , 0.04138042, -0.37411544,
-0.72693235, 0.98576236, 4.3623953 , 0.15061633, -5.8548403 ,
0.4428501 , 0.7019351 , 0.5021822 , -0.51518 , 0.64115334,
0.88208675, 0.73627514, 0.30294424, -0.05724922, -0.28375804,
-0.84227926, -0.33873045, 0.92601454, 0.42972735, 0.3497457 ,
0.04293469, -0.31612197, 1.7970682 , 0.7558606 , -0.56768864], dtype=np.float32)}]
index_name = 'test_index'
composition_functions = ["sum", "avg", "avg_lr", "avg_rl", "ind_lr", "ind_rl", "jnt_lr", "jnt_rl"]
vector_dim = 50
elasticsearch_manager.delete_index(index_name)
elasticsearch_manager.create_index(index_name, data2index, composition_functions, vector_dim)
time.sleep(30)
testset = [(1, 2), (2, 1)]
evaluator = Evaluator(testset, index_name, elasticsearch_manager, 3, True)
def test_0_evaluator(self):
self.assertEqual(len(self.evaluator.test_points), len(self.testset))
self.assertIsInstance(self.evaluator.test_points[0], TestPoint)
def test_1_get_composition_functions(self):
cf = self.evaluator.get_composition_functions()
self.assertEqual(set(cf), set(self.composition_functions))
def test_2_get_all_partials(self):
for tp in self.evaluator.test_points:
tp.get_all_partials(self.index_name, self.elasticsearch_manager)
if tp.song_id == 1:
self.assertEqual(len(tp.partials), 5)
elif tp.song_id == 2:
self.assertEqual(len(tp.partials), 2)
def test_3_search_similar_partials(self):
for tp in self.evaluator.test_points:
tp.search_similar_partials(self.index_name, self.elasticsearch_manager, self.composition_functions)
def test_4_merge_results(self):
for tp in self.evaluator.test_points:
tp.merge_results()
def test_5_is_hit(self):
for tp in self.evaluator.test_points:
tp.is_hit(self.evaluator.n)
def test_6_accuracies(self):
accuracies = self.evaluator.compute_accuracies()
print(accuracies)
if __name__ == '__main__':
unittest.main()
|
"""
Contains a class as a datastore for model parameters
"""
import random
import typing
import mesh_tensorflow as mtf
import tensorflow as tf
from tensorflow.python.tpu.device_assignment import DeviceAssignment
class BlockConfig:
def __init__(self, config, memory_reduction_strategy: str):
if isinstance(config, BlockConfig):
config = config.__dict__
self.layer = []
self.skip = False
self.memory_reduction_strategy = memory_reduction_strategy
self.__dict__.update(config)
class LearningRateConfig:
def __init__(self, start_step: int = 0, final_step: int = 0, factor: float = 1.):
self.start_step = start_step
self.final_step = final_step
self.factor = factor
class TensorStorage:
def __init__(self):
self.text_input_embedding: typing.Optional[mtf.Tensor] = None
class ModelParameter(typing.Dict[str, typing.Any]):
def __init__(self, config: typing.Dict[str, typing.Any]):
super().__init__()
self.position_embedding = "absolute" # "absolute" or "relative"(-learned) or "axial" | orthogonal for variables
self.token_embedding = "absolute"
self.empty_frame_embedding = "absolute"
self.output_embedding = "absolute-orthogonal" # embedding options above
self.use_video = True
self.save_graph = False
self.use_language = True
self.contrastive_across_samples = False
self.contrastive_across_token_embeddings = False
self.input_dropout = 0.
self.output_offset = 1
self.weight_standardisation = True
self.use_checkpointing = False
self.max_checkpoints_keep = 1
self.steps_per_checkpoint = 100_000
self.time_patch = 1
self.patch_size = 16
self.frame_width = 320
self.frame_height = 176
self.opt_beta1 = 0.9
self.opt_beta2 = 0.999
self.vocab_size = 256
self.color_channels = 3
self.three_axes = True
self.dataset_configs = []
self.data_seed = 456772
self.parallel_batch = None
self.parallel_interleave = None
self.use_random_dataloader = False
self.train = True
self.debug_sample = False
self.padding_token = 0
self.concat_token = 4
self.sequence_length = 32
self.heads = 8
self.features: typing.Optional[int] = None
self.features_per_head: typing.Optional[int] = None
self.depth = 16
self.buffer_size = 4
self.combine_assignments = False # Needs more memory but it's faster
self.shuffle_buffer = 256
self.interleaved_datasets = 256
self.token_patch_size = 1
self.learning_rate = 5e-5
self.storage_dtype = "float32"
self.slice_dtype = "float32"
self.calculation_dtype = "float32"
self.optimizer_slice_dtype = "float32"
self.optimizer_calculation_dtype = "float32"
self.learning_rate_config = {}
self.train_batch_size = 1
self.grad_accumulation = 1
self.macro_batching = 1
self.macro_batch_loss_smoothing = False
self.reduce_lr_on_plateau_timespan = 0
self.reduce_lr_on_plateau_reduction = 2
self.momentumnet_alpha = 0.99
self.current_step = 0
self.tpu_size = 32
self.default_sleep_duration = 0.1
self.lookahead_steps = 0
self.lookahead_alpha = 0
self.momentum = 0.95
self.prefix = "datasets/full_hd_video"
self.model_path = "gs://text-datasets/video-transformer/ctx=32-layer=64-heads=8-feat=256"
self.tensorflow_optimization_settings = {"layout_optimizer": True,
"constant_folding": True,
"shape_optimization": True,
"remapping": True,
"arithmetic_optimization": True,
"dependency_optimization": True,
"loop_optimization": True,
"function_optimization": True,
"debug_stripper": True,
"scoped_allocator_optimization": True,
"pin_to_host_optimization": True,
"implementation_selector": True,
"auto_mixed_precision": True,
"disable_meta_optimizer": False,
"disable_model_pruning": False,
"min_graph_nodes": 0
}
self.language_token_per_frame = 0
self.weight_decay = 0.001
self.vocab_weight_factorization = 0.125
self.train_steps = 2 ** 30
self.warmup_steps = 3000
self.rezero_lr_multiplier = 0.1
self.learning_rate_decay_multi = 1
self.convolution_size = 16
self.learning_rate_decay_start_step = 100_000
self.learning_rate_decay_min = 5e-10
self.iterations = 2500
self.initial_autoregressive_position = 128
self.use_autoregressive_sampling = False
self.sampling_temperature = 0
self.weight_centralisation = True
self.shuffle_input_filenames = True
self.calc_accuracy = False
self.num_of_sample = 10
self.web_workers = 1
self.equal_debugging_items_per_check = 16
self.group_linear_factor = 2
self.embedding_stddev = 0.04
self.color_quantization_value = 256
self.experts = 64
self.pkm_axes = 2 # 2 axis = features^2 keys, 3 axis = features^3 keys...
self.use_bit_fold_input_pipeline = False
self.bit_fold_value = 4
self.debug_train_step = False
self.model_mode = 'jannet'
self.optimizer = 'learning_rate'
self.multi_loss_strategy = "linear"
self.memory_reduction_strategy = "revnet"
self.debug_gradients = False
self.use_initial_position_embedding = False
self.intermediate_feed_forward_multiplier = None
self.intermediate_feed_forward_multiplier_multiplier = None
self.own_color = "\x1b[32;1m"
self.other_color = "\x1b[0m"
self.scale_by_depth = True
self.z_loss = 1e-4
self.block_config = [{'layer': ["norm-group-shift-scale",
"feed_forward-in_relu-group-in_glu_add-in_norm"]
},
{'layer': ["norm-group-std-shift-scale",
"attention-in_relu-embedded-relative"]
}]
self.input_block_config = []
self.output_block_config = []
self.mesh: typing.Optional[mtf.Mesh] = None
self.d_assignment: typing.Optional[DeviceAssignment] = None
self.mesh_impl: typing.Optional[mtf.simd_mesh_impl.SimdMeshImpl] = None
self.num_cores = 0
self.num_hosts = 0
self.num_cores_per_host = 0
self.masked_attention_dimensions = [0]
self.split_grad_accumulation = True
self.log_dict_keys = []
if hasattr(config, 'dict'):
config = config.dict()
for k, v in config.items():
if k not in self.__dict__:
print(f"WARNING: Unknown ModelParameter {k}={v!r}")
self.__dict__[k] = v
if self.grad_accumulation > 1:
raise ValueError("Gradient accumulation is not supported right now. The optimizer was split into two "
"different 'sections' where the 'accumulation' section still has to be integrated")
assert self.macro_batching > 0, "MacroBatching has to be >=1, where 1 means it's disabled"
if isinstance(self.position_embedding, str):
self.position_embedding = self.position_embedding.split('-')
self.token_embedding = self.token_embedding.split('-')
self.output_embedding = self.output_embedding.split('-')
self.empty_frame_embedding = self.empty_frame_embedding.split('-')
self.slice_dtype = getattr(tf, self.slice_dtype)
self.storage_dtype = getattr(tf, self.storage_dtype)
self.calculation_dtype = getattr(tf, self.calculation_dtype)
self.optimizer_slice_dtype = getattr(tf, self.optimizer_slice_dtype)
self.optimizer_calculation_dtype = getattr(tf, self.optimizer_calculation_dtype)
self.learning_rate_config = {key: LearningRateConfig(**config) for key, config in
self.learning_rate_config.items()}
self.tensor_storage = TensorStorage()
self.multi_loss_strategy = self.multi_loss_strategy.lower()
if self.multi_loss_strategy not in ["linear", "pcgrad", "mgda"]:
print(f'{self.multi_loss_strategy} is not in the support option list for multi loss strategies: '
f'["linear", "pcgrad", "mgda"]. default to "linear".')
self.multi_loss_strategy = "linear"
if not self.use_language and not self.use_video:
raise ValueError("Language and video mode are disabled. No model can be built.")
if self.weight_standardisation and not self.weight_centralisation:
print("Can't standardise weights without centralizing them first. Enabling it.")
self.weight_centralisation = True
if self.features is None and self.features_per_head is None:
raise ValueError("Either features or features_per_head has to be specified")
if self.features is None:
self.features = self.features_per_head * self.heads
if self.features_per_head is None:
self.features_per_head = self.features // self.heads
if self.use_video and (self.frame_width * self.frame_height // self.patch_size) % self.experts:
raise ValueError("Frame size has to be divisible by number of experts. Set \"experts\" to 1 if you're not "
"using MoE")
if self.intermediate_feed_forward_multiplier_multiplier is not None:
self.intermediate_feed_forward_multiplier = \
self.group_linear_factor * self.intermediate_feed_forward_multiplier_multiplier / self.heads
if self.intermediate_feed_forward_multiplier is None:
self.intermediate_feed_forward_multiplier = self.group_linear_factor / self.heads
if not self.use_video and self.language_token_per_frame != self.sequence_length:
print(
f"language_token_per_frame is unused in language-only mode. Overwriting with sequence_length={self.sequence_length}")
self.language_token_per_frame = self.sequence_length
if self.macro_batching > 1 and self.grad_accumulation > 1 and self.macro_batching % self.grad_accumulation != 0:
raise ValueError(f'"macro_batching" needs do be divisible by "grad_accumulation", '
f'{self.macro_batching} is not divisible by {self.grad_accumulation}')
if self.use_random_dataloader:
print('WARNING: Use random dataset seed')
for _ in range(random.randint(0, 1000)):
self.data_seed = random.randint(0, 1000000)
split_batch = self.heads < self.tpu_size
split_heads = self.heads > 1
self.mesh_shape = ','.join([f"b:{self.tpu_size // self.heads:.0f}"] * split_batch +
[f"h:{self.heads:.0f}"] * split_heads)
self.layout = ','.join([f"batch:b"] * split_batch +
[f"heads:h"] * split_heads)
self.variable_dtype = mtf.VariableDType(self.storage_dtype, self.slice_dtype, self.calculation_dtype)
self.optimizer_dtype = mtf.VariableDType(self.storage_dtype, self.optimizer_slice_dtype,
self.optimizer_calculation_dtype)
self.block_config = [BlockConfig(conf, memory_reduction_strategy=self.memory_reduction_strategy) for conf in
self.block_config]
self.input_block_config = [BlockConfig(conf, memory_reduction_strategy="checkpoint") for conf in
self.input_block_config]
self.output_block_config = [BlockConfig(conf, memory_reduction_strategy="checkpoint") for conf in
self.output_block_config]
self.time_patch_size = self.sequence_length // self.time_patch
self.frame_height_patch = self.frame_height // self.patch_size
self.frame_width_patch = self.frame_width // self.patch_size
self.channel_color_size = self.color_channels * self.time_patch * self.patch_size ** 2
self.fold_count = 32 // self.bit_fold_value
if 2 ** self.bit_fold_value < self.color_quantization_value and self.use_bit_fold_input_pipeline:
raise ValueError("when folding the input, the fold value must be qual or lager then the color bit value")
self.language_token_patch = self.language_token_per_frame // self.token_patch_size
if self.use_bit_fold_input_pipeline:
self.channel_color_size = self.channel_color_size // self.fold_count
self.product_key_value_vectors = self.features_per_head ** 2
self.product_key_value_dim = mtf.Dimension("product_key_value_dim", self.product_key_value_vectors)
self.head_dim = mtf.Dimension("heads", self.heads)
self.head_dimensions = [self.head_dim]
self.key_dim = mtf.Dimension("features_per_head", self.features // self.heads)
self.sequence_per_head_dim = mtf.Dimension("sequence_per_head", self.time_patch_size // self.heads)
self.pkm_dim = mtf.Dimension("pkm_axes", self.pkm_axes)
self.feature_dims = self.head_dimensions + [self.key_dim]
self.intermediate = [mtf.Dimension("intermediate",
int(self.heads * self.key_dim.size *
self.intermediate_feed_forward_multiplier))]
self.expert_dim = mtf.Dimension("experts", self.experts)
self.macro_batch_dim = mtf.Dimension("batch", self.train_batch_size * self.macro_batching)
self.vocab_dim = mtf.Dimension('vocab', self.vocab_size)
self.batch_dim = mtf.Dimension("batch", self.train_batch_size)
self.frame_input_sequence = mtf.Dimension("_sequence", self.time_patch_size + 1)
frame_input_shape = [self.batch_dim, self.frame_input_sequence]
if self.three_axes:
frame_input_shape += [
mtf.Dimension("height", self.frame_height_patch),
mtf.Dimension("width", self.frame_width_patch),
]
else:
frame_input_shape += [
mtf.Dimension(
"height", self.frame_height_patch * self.frame_width_patch
)
]
self.color_channel_dim = mtf.Dimension("color_channels", self.channel_color_size)
frame_input_shape += [self.color_channel_dim]
self.frame_input_shape = mtf.Shape(frame_input_shape)
self.input_pipeline_shape = {}
self.sequence_dim = mtf.Dimension("sequence", self.time_patch_size)
self.token_patch_dim = mtf.Dimension("language_token_patch", self.token_patch_size)
self.token_dim_shape = mtf.Shape([self.batch_dim,
self.sequence_dim,
self.token_patch_dim])
self.frame_mask_shape = mtf.Shape([self.batch_dim, self.sequence_dim])
if self.use_video:
self.input_pipeline_shape['frame'] = self.frame_input_shape
self.input_pipeline_shape['cat_mask_x'] = self.frame_mask_shape
self.input_pipeline_shape['cat_mask_y'] = self.frame_mask_shape
self.input_pipeline_shape['vid_msk_src'] = self.frame_mask_shape
self.input_pipeline_shape['vid_msk_tgt'] = self.frame_mask_shape
self.discrete_dim = [mtf.Dimension("discrete", self.channel_color_size * self.color_quantization_value)]
self.discrete_color_dim = mtf.Dimension("color_quantization", self.color_quantization_value)
if self.use_language:
self.input_pipeline_shape['token_x'] = self.token_dim_shape
self.input_pipeline_shape['token_y'] = self.token_dim_shape
if self.use_language and self.use_video:
self.token_dim_shape._dims.insert(2, mtf.Dimension("height", self.language_token_patch))
self.input_pipeline_shape['txt_msk'] = self.token_dim_shape
self.input_pipeline_shape = align_tensor_op(self.input_pipeline_shape)
self.attention_idx = 0
self.variable_cache = {}
self.cached_parameters = {}
self.debug_outfeed = {}
def __getitem__(self, key: str) -> typing.Any:
print(f"Getting {key} via deprecated interface")
return self.key
def __setitem__(self, key: str, value: typing.Any) -> None:
print(f"Setting {key} via deprecated interface")
self.key = value
def get(self, key: str, default: typing.Any) -> typing.Any:
"""
Default python get from list
:param key: key to check for in dictionary
:param default: default value if key doesn't exist
:return: whatever value belongs to the key or the default
"""
print(f"Getting {key} via deprecated interface with default value {default}")
return self.__dict__.get(key, default)
def __str__(self) -> str:
return str(self.__dict__)
def __repr__(self) -> str:
return str(self)
def dict(self) -> typing.Dict[str, typing.Any]:
"""
:return: dictionary containing parameters
"""
return self.__dict__
def align_tensor_op(x):
tensors = []
if 'frame' in x:
tensors.extend([x['frame'], x['cat_mask_x'], x['cat_mask_y']])
tensors.extend([x['vid_msk_src'], x['vid_msk_tgt']])
if 'token_x' in x:
tensors.extend([x['token_x'], x['token_y']])
if 'txt_msk' in x:
tensors.append(x['txt_msk'])
return tensors
class BlockArgs:
def __init__(self, params: ModelParameter, tensor: typing.Optional[mtf.Tensor], name_extras: typing.List[str],
is_last: bool = False):
self.params = params
self.tensor = tensor
self.name_extras = name_extras
self.is_last = is_last
def __call__(self, *args: typing.Union[ModelParameter, mtf.Tensor, typing.List[str], str]):
new = BlockArgs(self.params, self.tensor, self.name_extras[:])
for a in args:
if isinstance(a, ModelParameter):
new.params = a
elif isinstance(a, mtf.Tensor):
new.tensor = a
elif isinstance(a, (list, tuple)):
new.name_extras = list(a)
elif isinstance(a, str):
new.name_extras.append(str)
else:
raise ValueError(f"Argument {a} is of unsupported type {type(a)}. "
f"Only ModelParameter, mtf.Tensor, typing.List[str] and str are supported")
return new
def __iter__(self):
for itm in self.name_extras:
yield itm
def __len__(self):
return len(self.name_extras)
def __getitem__(self, idx: int):
return self.name_extras[idx]
|
import numpy as np
from datetime import datetime
import pandas as pd
import pyodbc
class Config:
def __init__(self):
self.server='iZ1wll9swpj1hbZ\SQLEXPRESS'
self.database = '39F'
self.username ='harryyan'
self.password ='Aa12345678'
class EXC:
def __init__(self):
self.aaa = 0
class EXC_WF_SALE_DOWNLOAD(EXC):
def __init__(self,country):
EXC.__init__(self)
source_path = 'E://OneDrive//广新//四象限//uploaded_files//'
if country == 'US':
print("reading US excel file")
self.USdata = pd.read_csv(source_path + 'WF_US_Sales.csv')
if len(self.USdata.columns) == 47:
self.USdata['Sales_Channel'] = None
# data cleaning
self.USdata['Item Number'] = self.USdata.apply(lambda x: str(x['Item Number']).strip('="'), axis = 1)
self.USdata.replace("No Feed","",inplace=True)
else:
print("reading CA excel file")
self.CAdata = pd.read_csv(source_path + 'WF_CA_Sales.csv')
if len(self.CAdata.columns) == 47:
self.CAdata['Sales_Channel'] = None
# data cleaning
self.CAdata['Item Number'] = self.CAdata.apply(lambda x: str(x['Item Number']).strip('="'), axis = 1)
self.CAdata.replace("No Feed","",inplace=True)
class DB:
def __init__(self):
config=Config()
server=config.server
database = config.database
username =config.username
password =config.password
self.cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+ password)
self.cursor = self.cnxn.cursor()
def disconnect(self):
print('\nSuccessfully disconnected!\n')
self.cursor.close()
def get_df(self, query):
return pd.read_sql(query, self.cnxn)
def run(self, query):
self.cursor.execute(query)
self.cursor.commit()
print('Successfully run!\n')
def commit(self):
self.cursor.commit()
class WF_SALE(DB):
def __init__(self):
DB.__init__(self)
self.header = self.get_df('SELECT TOP(1) * FROM [Wayfair_US_Sales];').columns
self.names = '],['.join(self.header)
self.values = '?,'*len(self.header)
def insert(self,df,table):
for index, row in df.iterrows():
self.cursor.execute(f"INSERT INTO {table} ([{self.names}]) values({self.values[:-1]});",
row['Warehouse_Name'],
row['Store_Name'],
row['PO_Number'],
row['PO_Date'],
row['Must_Ship_By'],
row['Backorder_Date'],
row['Order_Status'],
row['Item_Number'],
row['Item_Name'],
row['Quantity'],
row['Wholesale_Price'],
row['Ship_Method'],
row['Carrier_Name'],
row['Shipping_Account_Number'],
row['Ship_To_Name'],
row['Ship_To_Address'],
row['Ship_To_Address_2'],
row['Ship_To_City'],
row['Ship_To_State'],
row['Ship_To_Zip'],
row['Ship_To_Phone'] if row['Ship_To_Phone'] is not np.nan else None,
row['Inventory_at_PO_Time'] if type(row['Inventory_at_PO_Time']) is int else None,
row['Inventory_Send_Date'],
row['Ship_Speed'],
row['PO_Date_&_Time'],
row['Registered_Timestamp'],
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
row['Packing_Slip_URL'],
row['Tracking_Number'],
row['Ready_for_Pickup_Date'],
row['SKU'],
row['Destination_Country'],
None,
None,
None,
None,
row['B2BOrder'],
row['Composite_Wood_Product'],
None
)
def run_app():
# initialization
exc = EXC_WF_SALE_DOWNLOAD('US')
db = WF_SALE()
# find timeframe
date_us_max = max(exc.USdata['PO Date'].astype('datetime64')).strftime('%Y-%m-%d')
date_us_min = min(exc.USdata['PO Date'].astype('datetime64')).strftime('%Y-%m-%d')
print('To replace time frame: ')
print(f'US: from {date_us_min} to {date_us_max}\n')
# delete from tables in db
print('US to delete related timeframe:\n')
db.run(f"DELETE FROM [Wayfair_US_Sales] WHERE [PO_Date] between '{date_us_min}' and '{date_us_max}';")
# Syn headers
exc.USdata.columns = db.header
# clean data
exc.USdata = exc.USdata.fillna(np.nan).replace([np.nan], [None])
# insert to Database
t0 = datetime.now()
db.insert(exc.USdata, '[Wayfair_US_Sales]')
print('insert')
db.commit()
print('commit')
t1 = datetime.now()
print('Time used: ', t1-t0)
if __name__ == '__main__':
run_app()
|
"""
Contains useful constants.
"""
KMD_AUTH_HEADER = "X-KMD-API-Token"
"""str: header key for kmd requests"""
ALGOD_AUTH_HEADER = "X-Algo-API-Token"
"""str: header key for algod requests"""
INDEXER_AUTH_HEADER = "X-Indexer-API-Token"
"""str: header key for indexer requests"""
UNVERSIONED_PATHS = ["/health", "/versions", "/metrics", "/genesis"]
"""str[]: paths that don't use the version path prefix"""
NO_AUTH = []
"""str[]: requests that don't require authentication"""
# transaction types
PAYMENT_TXN = "pay"
"""str: indicates a payment transaction"""
KEYREG_TXN = "keyreg"
"""str: indicates a key registration transaction"""
ASSETCONFIG_TXN = "acfg"
"""str: indicates an asset configuration transaction"""
ASSETFREEZE_TXN = "afrz"
"""str: indicates an asset freeze transaction"""
ASSETTRANSFER_TXN = "axfer"
"""str: indicates an asset transfer transaction"""
APPCALL_TXN = "appl"
"""str: indicates an app call transaction, allows creating, deleting, and interacting with an application"""
# note field types
NOTE_FIELD_TYPE_DEPOSIT = "d"
"""str: indicates a signed deposit in NoteField"""
NOTE_FIELD_TYPE_BID = "b"
"""str: indicates a signed bid in NoteField"""
NOTE_FIELD_TYPE_SETTLEMENT = "s"
"""str: indicates a signed settlement in NoteField"""
NOTE_FIELD_TYPE_PARAMS = "p"
"""str: indicates signed params in NoteField"""
# prefixes
TXID_PREFIX = b"TX"
"""bytes: transaction prefix when signing"""
TGID_PREFIX = b"TG"
"""bytes: transaction group prefix when computing the group ID"""
BID_PREFIX = b"aB"
"""bytes: bid prefix when signing"""
BYTES_PREFIX = b"MX"
"""bytes: bytes prefix when signing"""
MSIG_ADDR_PREFIX = "MultisigAddr"
"""str: prefix for multisig addresses"""
LOGIC_PREFIX = b"Program"
"""bytes: program (logic) prefix when signing"""
LOGIC_DATA_PREFIX = b"ProgData"
"""bytes: program (logic) data prefix when signing"""
APPID_PREFIX = b"appID"
"""bytes: application ID prefix when signing"""
HASH_LEN = 32
"""int: how long various hash-like fields should be"""
CHECK_SUM_LEN_BYTES = 4
"""int: how long checksums should be"""
KEN_LEN_BYTES = 32
"""int: how long addresses are in bytes"""
ADDRESS_LEN = 58
"""int: how long addresses are in base32, including the checksum"""
MNEMONIC_LEN = 25
"""int: how long mnemonic phrases are"""
MIN_TXN_FEE = 1000
"""int: minimum transaction fee"""
MICROALGOS_TO_ALGOS_RATIO = 1000000
"""int: how many microalgos per algo"""
METADATA_LENGTH = 32
"""int: length of asset metadata"""
NOTE_MAX_LENGTH = 1024
"""int: maximum length of note field"""
LEASE_LENGTH = 32
"""int: byte length of leases"""
MULTISIG_ACCOUNT_LIMIT = 255
"""int: maximum number of addresses in a multisig account"""
TX_GROUP_LIMIT = 16
"""int: maximum number of transaction in a transaction group"""
MAX_ASSET_DECIMALS = 19
"""int: maximum value for decimals in assets"""
# logic sig related
LOGIC_SIG_MAX_COST = 20000
"""int: max execution cost of a teal program"""
LOGIC_SIG_MAX_SIZE = 1000
"""int: max size of a teal program and its arguments in bytes"""
# for backward compatibility:
kmd_auth_header = KMD_AUTH_HEADER
algod_auth_header = ALGOD_AUTH_HEADER
indexer_auth_header = INDEXER_AUTH_HEADER
unversioned_paths = UNVERSIONED_PATHS
no_auth = NO_AUTH
payment_txn = PAYMENT_TXN
keyreg_txn = KEYREG_TXN
assetconfig_txn = ASSETCONFIG_TXN
assetfreeze_txn = ASSETFREEZE_TXN
assettransfer_txn = ASSETTRANSFER_TXN
appcall_txn = APPCALL_TXN
note_field_type_deposit = NOTE_FIELD_TYPE_DEPOSIT
note_field_type_bid = NOTE_FIELD_TYPE_BID
note_field_type_settlement = NOTE_FIELD_TYPE_SETTLEMENT
note_field_type_params = NOTE_FIELD_TYPE_PARAMS
txid_prefix = TXID_PREFIX
tgid_prefix = TGID_PREFIX
bid_prefix = BID_PREFIX
bytes_prefix = BYTES_PREFIX
msig_addr_prefix = MSIG_ADDR_PREFIX
logic_prefix = LOGIC_PREFIX
logic_data_prefix = LOGIC_DATA_PREFIX
hash_len = HASH_LEN
check_sum_len_bytes = CHECK_SUM_LEN_BYTES
key_len_bytes = KEN_LEN_BYTES
address_len = ADDRESS_LEN
mnemonic_len = MNEMONIC_LEN
min_txn_fee = MIN_TXN_FEE
microalgos_to_algos_ratio = MICROALGOS_TO_ALGOS_RATIO
metadata_length = METADATA_LENGTH
note_max_length = NOTE_MAX_LENGTH
lease_length = LEASE_LENGTH
multisig_account_limit = MULTISIG_ACCOUNT_LIMIT
tx_group_limit = TX_GROUP_LIMIT
max_asset_decimals = MAX_ASSET_DECIMALS
logic_sig_max_cost = LOGIC_SIG_MAX_COST
logic_sig_max_size = LOGIC_SIG_MAX_SIZE
|
from sqlalchemy import select
from src.repositories.irepository import IRepository
from src.models import Base
class SqlRepository(IRepository):
model = Base
async def get_all(self):
async with self.session_factory() as session:
result = await session.execute(select(self.model))
return result.scalars().all() |
"""
Data Loader for Pathology image data files.
##################
License: Apache License 2.0
author: Caner Mercan, 2018
"""
import pdb
import numpy as np
from .directory import DataDir as DD
from . import dataNames as DN
import scipy.io as sio
class Cases():
"""
Loading all case IDs and names into memory.
"""
dir = DD.DIR_DATA_SUPLEMENTARY
IDS = None
NAMES = None
CV = None
PAIRS = None
@staticmethod
def loadCasesMat():
cases_struct = sio.loadmat(DD.DIR_DATA_SUPLEMENTARY + DN.CASES_MFILE)
Cases.CV = cases_struct[DN.CASECV_KEY].tolist()
Cases.IDS = cases_struct[DN.CASEIDS_KEY].flatten().tolist()
Cases.NAMES = [c[0][:-4] for c in cases_struct[DN.CASENAMES_KEY].flatten()]
Cases.PAIRS = {cID:cName for cID,cName in zip(Cases.IDS, Cases.NAMES)}
class Polygons():
"""
Loading all polygons (and soft_rects) into memory.
"""
dir = DD.DIR_DATA
polygons = None
filters = None
soft_rects = None
consensus_coords = None
Lab_histograms = None
LBP_histograms = None
arch_features = None
#unique_expertIDs = []
#unique_actionIDs = []
@staticmethod
def loadPolygonsMat():
Polygons.polygons = Polygons.__loadFromPolygonMat(DN.POLYGONS_KEY, flatten=True)
Polygons.soft_rects = Polygons.__loadFromPolygonMat(DN.SOFTRECT_KEY)
Polygons.consensus_coords = Polygons.__loadFromDataMat(DN.CONSENSUS_COORDS_KEY)
# extracted polygon features
Polygons.Lab_histograms = Polygons.__loadFromPolygonMat(DN.LAB_HISTOGRAMS_KEY)
Polygons.LBP_histograms = Polygons.__loadFromPolygonMat(DN.LBP_HISTOGRAMS_KEY)
Polygons.arch_features = Polygons.__loadFromPolygonMat(DN.ARCH_FEATURES_KEY)
#Polygons.unique_expertIDs = np.unique(Polygons.soft_rects[:,1]).tolist()
#Polygons.unique_actionIDs = np.unique(Polygons.soft_rects[:,2]).tolist()
Polygons.filters = Polygons.__loadPolygonFilters__()
# typically polygons/coords load into memory as uint16; may need to convert to signed not to have computational problems.
Polygons.polygons = np.array(list(map(lambda x: x.astype(np.int32), Polygons.polygons)))
Polygons.consensus_coords = np.array(list(map(lambda x: x.astype(np.int32), Polygons.consensus_coords)))
@staticmethod
def __loadFromPolygonMat(KEY, flatten=False, is_dict=False):
"""
loads EXPERTs ROI polygons/soft_rects and etc. into memory
"""
struct = sio.loadmat(DD.DIR_DATA + DN.POLYGONS_MFILE, variable_names=KEY)
struct = struct[KEY].flatten() if flatten else struct[KEY]
struct = {key:value.squeeze() for key, value in zip(struct[0].dtype.names, struct[0])} if is_dict else struct
return struct
@staticmethod
def __loadFromDataMat(KEY):
"""
loads Consensus ROI polygons into memory
"""
struct = sio.loadmat(DD.DIR_DATA + DN.LABELS_MFILE, variable_names=KEY)
struct = struct[KEY].flatten()
return struct
def __loadPolygonFilters__():
struct = Polygons.__loadFromPolygonMat(DN.POLYGON_FILTERS_KEY, flatten=True, is_dict=True)
struct[DN.FILTERS_ESSENTIALS] = struct[DN.FILTERS_ESSENTIALS].astype(bool)
struct[DN.FILTERS_TOPK_POLY] = {k:topk.astype(bool) for k, topk in zip(struct[DN.FILTERS_K], struct[DN.FILTERS_TOPK_POLY])}
del struct[DN.FILTERS_K]
return struct
#@staticmethod
#def __write2file(variable_names):
# sio.savemat(DD.DIR_DATA + DN.POLYGONS_MFILE
class Labels():
"""
Loads all class labels into memory.
"""
dir = DD.DIR_DATA
NUM_CLASSES = DN.NUM_CLASSES
classes = {}
expert_diags = {}
consensus_diags = {}
def loadLabelsMat():
for i,cls in enumerate(Labels.NUM_CLASSES):
Labels.classes[cls] = Labels.__loadFromDataMat(DN.CLASSES_KEYS[i] , cell=1)
Labels.expert_diags[cls] = Labels.__loadFromDataMat(DN.EXPERT_DIAGS_KEYS[i], cell=2)
Labels.consensus_diags[cls] = Labels.__loadFromDataMat(DN.CONSENSUS_DIAGS_KEYS[i], cell=1)
def __loadFromDataMat(KEY, cell=0):
"""
cell denotes the number of levels inside the struct.
"""
struct = sio.loadmat(DD.DIR_DATA + DN.LABELS_MFILE, variable_names=KEY)
struct = struct[KEY]
if cell==2:
# EXPERT structs contain 3 expert opinions; require different type of 'flatten'
struct = [[val[i].flatten() for i in range(len(val)) ] for val in struct]
elif cell==1:
struct = [val[0] for val in struct.flatten()]
else:
struct = struct.flatten().tolist()
return struct
|
#
# author: Jungtaek Kim (jtkim@postech.ac.kr)
# last updated: September 24, 2020
#
"""It defines wrappers for Bayesian optimization."""
import time
import numpy as np
from bayeso import bo
from bayeso import constants
from bayeso.utils import utils_bo
from bayeso.utils import utils_common
from bayeso.utils import utils_logger
logger = utils_logger.get_logger('wrappers_bo')
@utils_common.validate_types
def run_single_round_with_all_initial_information(model_bo: bo.BO,
fun_target: constants.TYPING_CALLABLE,
X_train: np.ndarray, Y_train: np.ndarray,
num_iter: int,
str_sampling_method_ao: str=constants.STR_SAMPLING_METHOD_AO,
num_samples_ao: int=constants.NUM_SAMPLES_AO,
str_mlm_method: str=constants.STR_MLM_METHOD
) -> constants.TYPING_TUPLE_FIVE_ARRAYS:
"""
It optimizes `fun_target` for `num_iter` iterations with given `model_bo`.
It returns the optimization results and execution times.
:param model_bo: Bayesian optimization model.
:type model_bo: bayeso.bo.BO
:param fun_target: a target function.
:type fun_target: callable
:param X_train: initial inputs. Shape: (n, d) or (n, m, d).
:type X_train: numpy.ndarray
:param Y_train: initial outputs. Shape: (n, 1).
:type Y_train: numpy.ndarray
:param num_iter: the number of iterations for Bayesian optimization.
:type num_iter: int.
:param str_sampling_method_ao: the name of initialization method for
acquisition function optimization.
:type str_sampling_method_ao: str., optional
:param num_samples_ao: the number of samples for acquisition function
optimization. If L-BFGS-B is used as an acquisition function
optimization method, it is employed.
:type num_samples_ao: int., optional
:param str_mlm_method: the name of marginal likelihood maximization
method for Gaussian process regression.
:type str_mlm_method: str., optional
:returns: tuple of acquired examples, their function values, overall
execution times per iteration, execution time consumed in Gaussian
process regression, and execution time consumed in acquisition
function optimization. Shape: ((n + `num_iter`, d), (n + `num_iter`, 1),
(`num_iter`, ), (`num_iter`, ), (`num_iter`, )), or ((n + `num_iter`, m, d),
(n + `num_iter`, m, 1), (`num_iter`, ), (`num_iter`, ), (`num_iter`, )).
:rtype: (numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray)
:raises: AssertionError
"""
assert isinstance(model_bo, bo.BO)
assert callable(fun_target)
assert isinstance(X_train, np.ndarray)
assert isinstance(Y_train, np.ndarray)
assert isinstance(num_iter, int)
assert isinstance(str_sampling_method_ao, str)
assert isinstance(num_samples_ao, int)
assert isinstance(str_mlm_method, str)
assert len(X_train.shape) == 2
assert len(Y_train.shape) == 2
assert X_train.shape[0] == Y_train.shape[0]
assert Y_train.shape[1] == 1
assert str_mlm_method in constants.ALLOWED_MLM_METHOD
time_start = time.time()
X_final = X_train
Y_final = Y_train
time_all_final = []
time_surrogate_final = []
time_acq_final = []
for ind_iter in range(0, num_iter):
logger.info('Iteration %d', ind_iter + 1)
time_iter_start = time.time()
next_point, dict_info = model_bo.optimize(X_final, Y_final,
str_sampling_method=str_sampling_method_ao,
num_samples=num_samples_ao, str_mlm_method=str_mlm_method)
next_points = dict_info['next_points']
acquisitions = dict_info['acquisitions']
time_surrogate = dict_info['time_surrogate']
time_acq = dict_info['time_acq']
if model_bo.debug:
logger.debug('next_point: %s', utils_logger.get_str_array(next_point))
if np.where(np.linalg.norm(next_point - X_final, axis=1)\
< constants.TOLERANCE_DUPLICATED_ACQ)[0].shape[0] > 0: # pragma: no cover
next_point = utils_bo.get_next_best_acquisition(next_points, acquisitions, X_final)
if model_bo.debug:
logger.debug('next_point is repeated, so next best is selected.\
next_point: %s', utils_logger.get_str_array(next_point))
X_final = np.vstack((X_final, next_point))
time_to_evaluate_start = time.time()
Y_final = np.vstack((Y_final, fun_target(next_point)))
time_to_evaluate_end = time.time()
if model_bo.debug:
logger.debug('time consumed to evaluate: %.4f sec.',
time_to_evaluate_end - time_to_evaluate_start)
time_iter_end = time.time()
time_all_final.append(time_iter_end - time_iter_start)
time_surrogate_final.append(time_surrogate)
time_acq_final.append(time_acq)
time_end = time.time()
if model_bo.debug:
logger.debug('overall time consumed in single BO round: %.4f sec.', time_end - time_start)
time_all_final = np.array(time_all_final)
time_surrogate_final = np.array(time_surrogate_final)
time_acq_final = np.array(time_acq_final)
return X_final, Y_final, time_all_final, time_surrogate_final, time_acq_final
@utils_common.validate_types
def run_single_round_with_initial_inputs(model_bo: bo.BO,
fun_target: constants.TYPING_CALLABLE,
X_train: np.ndarray, num_iter: int,
str_sampling_method_ao: str=constants.STR_SAMPLING_METHOD_AO,
num_samples_ao: int=constants.NUM_SAMPLES_AO,
str_mlm_method: str=constants.STR_MLM_METHOD,
) -> constants.TYPING_TUPLE_FIVE_ARRAYS:
"""
It optimizes `fun_target` for `num_iter` iterations with given
`model_bo` and initial inputs `X_train`.
It returns the optimization results and execution times.
:param model_bo: Bayesian optimization model.
:type model_bo: bayeso.bo.BO
:param fun_target: a target function.
:type fun_target: callable
:param X_train: initial inputs. Shape: (n, d) or (n, m, d).
:type X_train: numpy.ndarray
:param num_iter: the number of iterations for Bayesian optimization.
:type num_iter: int.
:param str_sampling_method_ao: the name of initialization method for
acquisition function optimization.
:type str_sampling_method_ao: str., optional
:param num_samples_ao: the number of samples for acquisition function
optimization. If L-BFGS-B is used as an acquisition function
optimization method, it is employed.
:type num_samples_ao: int., optional
:param str_mlm_method: the name of marginal likelihood maximization
method for Gaussian process regression.
:type str_mlm_method: str., optional
:returns: tuple of acquired examples, their function values, overall
execution times per iteration, execution time consumed in Gaussian
process regression, and execution time consumed in acquisition
function optimization. Shape: ((n + `num_iter`, d), (n + `num_iter`, 1),
(n + `num_iter`, ), (`num_iter`, ), (`num_iter`, )), or ((n + `num_iter`, m, d),
(n + `num_iter`, m, 1), (n + `num_iter`, ), (`num_iter`, ), (`num_iter`, )).
:rtype: (numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray)
:raises: AssertionError
"""
assert isinstance(model_bo, bo.BO)
assert callable(fun_target)
assert isinstance(X_train, np.ndarray)
assert isinstance(num_iter, int)
assert isinstance(str_sampling_method_ao, str)
assert isinstance(num_samples_ao, int)
assert isinstance(str_mlm_method, str)
assert len(X_train.shape) == 2
assert str_mlm_method in constants.ALLOWED_MLM_METHOD
Y_train = []
time_initials = []
for elem in X_train:
time_initial_start = time.time()
Y_train.append(fun_target(elem))
time_initial_end = time.time()
time_initials.append(time_initial_end - time_initial_start)
time_initials = np.array(time_initials)
Y_train = np.array(Y_train)
Y_train = np.reshape(Y_train, (Y_train.shape[0], 1))
X_final, Y_final, time_all_final, time_surrogate_final, time_acq_final \
= run_single_round_with_all_initial_information(
model_bo,
fun_target,
X_train,
Y_train,
num_iter,
str_sampling_method_ao=str_sampling_method_ao,
num_samples_ao=num_samples_ao,
str_mlm_method=str_mlm_method
)
return X_final, Y_final, \
np.concatenate((time_initials, time_all_final)), \
time_surrogate_final, time_acq_final
@utils_common.validate_types
def run_single_round(model_bo: bo.BO, fun_target: constants.TYPING_CALLABLE,
num_init: int, num_iter: int,
str_initial_method_bo: str=constants.STR_INITIALIZING_METHOD_BO,
str_sampling_method_ao: str=constants.STR_SAMPLING_METHOD_AO,
num_samples_ao: int=constants.NUM_SAMPLES_AO,
str_mlm_method: str=constants.STR_MLM_METHOD,
seed: constants.TYPING_UNION_INT_NONE=None
) -> constants.TYPING_TUPLE_FIVE_ARRAYS:
"""
It optimizes `fun_target` for `num_iter` iterations with given
`model_bo` and `num_init` initial examples.
Initial examples are sampled by `get_initials` method in `model_bo`.
It returns the optimization results and execution times.
:param model_bo: Bayesian optimization model.
:type model_bo: bayeso.bo.BO
:param fun_target: a target function.
:type fun_target: callable
:param num_init: the number of initial examples for Bayesian optimization.
:type num_init: int.
:param num_iter: the number of iterations for Bayesian optimization.
:type num_iter: int.
:param str_initial_method_bo: the name of initialization method for
sampling initial examples in Bayesian optimization.
:type str_initial_method_bo: str., optional
:param str_sampling_method_ao: the name of initialization method for
acquisition function optimization.
:type str_sampling_method_ao: str., optional
:param num_samples_ao: the number of samples for acquisition function
optimization. If L-BFGS-B is used as an acquisition function
optimization method, it is employed.
:type num_samples_ao: int., optional
:param str_mlm_method: the name of marginal likelihood maximization
method for Gaussian process regression.
:type str_mlm_method: str., optional
:param seed: None, or random seed.
:type seed: NoneType or int., optional
:returns: tuple of acquired examples, their function values, overall
execution times per iteration, execution time consumed in Gaussian
process regression, and execution time consumed in acquisition
function optimization. Shape: ((`num_init` + `num_iter`, d),
(`num_init` + `num_iter`, 1), (`num_init` + `num_iter`, ), (`num_iter`, ),
(`num_iter`, )), or ((`num_init` + `num_iter`, m, d), (`num_init` + `num_iter`, m, 1),
(`num_init` + `num_iter`, ), (`num_iter`, ), (`num_iter`, )),
where d is a dimensionality of the problem we are solving and m is
a cardinality of sets.
:rtype: (numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray)
:raises: AssertionError
"""
assert isinstance(model_bo, bo.BO)
assert callable(fun_target)
assert isinstance(num_init, int)
assert isinstance(num_iter, int)
assert isinstance(str_initial_method_bo, str)
assert isinstance(str_sampling_method_ao, str)
assert isinstance(num_samples_ao, int)
assert isinstance(str_mlm_method, str)
assert isinstance(seed, (int, type(None)))
assert str_initial_method_bo in constants.ALLOWED_INITIALIZING_METHOD_BO
assert str_mlm_method in constants.ALLOWED_MLM_METHOD
logger.info('range_X:\n%s', utils_logger.get_str_array(model_bo.range_X))
logger.info('str_cov: %s', model_bo.str_cov)
logger.info('str_acq: %s', model_bo.str_acq)
logger.info('str_optimizer_method_gp: %s', model_bo.str_optimizer_method_gp)
logger.info('str_optimizer_method_bo: %s', model_bo.str_optimizer_method_bo)
logger.info('str_modelselection_method: %s', model_bo.str_modelselection_method)
logger.info('num_init: %d', num_init)
logger.info('num_iter: %d', num_iter)
logger.info('str_initial_method_bo: %s', str_initial_method_bo)
logger.info('str_sampling_method_ao: %s', str_sampling_method_ao)
logger.info('num_samples_ao: %d', num_samples_ao)
logger.info('str_mlm_method: %s', str_mlm_method)
logger.info('seed: %s', seed)
time_start = time.time()
X_init = model_bo.get_initials(str_initial_method_bo, num_init, seed=seed)
if model_bo.debug:
logger.debug('X_init:\n%s', utils_logger.get_str_array(X_init))
X_final, Y_final, time_all_final, time_surrogate_final, time_acq_final \
= run_single_round_with_initial_inputs(
model_bo, fun_target, X_init, num_iter,
str_sampling_method_ao=str_sampling_method_ao,
num_samples_ao=num_samples_ao,
str_mlm_method=str_mlm_method
)
time_end = time.time()
if model_bo.debug:
logger.debug('overall time consumed including initializations: %.4f sec.',
time_end - time_start)
return X_final, Y_final, time_all_final, time_surrogate_final, time_acq_final
|
'''
A sensor is a callable taking no arguments,
returning a humidity and a temperature.
It is constructed with a `source' parameter.
'''
from functools import partial
def sensor(func):
'''
The sensor decorator converts a function
taking a source parameter into a sensor.
>>> @sensor
... def test(source):
... return source * 2
>>> sensor = test('foo')
>>> sensor()
'foofoo'
'''
def func_wrapper(source):
return partial(func, source)
return func_wrapper
@sensor
def dht11(source):
import Adafruit_DHT
hum, temp = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, int(source))
if hum < 0 or hum > 100:
raise ValueError('humidity out of bounds')
return dict(temperature=temp, humidity=hum)
@sensor
def dht22(source):
import Adafruit_DHT
hum, temp = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, int(source))
if hum < 0 or hum > 100:
raise ValueError('humidity out of bounds')
return dict(temperature=temp, humidity=hum)
@sensor
def climon(source):
from urllib import request
hum, temp = map(float, request.urlopen(source).read().split())
return dict(temperature=temp, humidity=hum)
@sensor
def rand(source):
import random
return dict(temperature=random.random() * 50 - 15, humidity=random.random() * 100)
def sine(source):
'''
A sensor returning metrics with variation visible both
at day and year scale, if called once a minute.
>>> s = sine('')
>>> s()['humidity'], s()['temperature']
(12.17, 50.83)
>>> for _ in range(9): _ = s()
>>> s()['humidity'], s()['temperature']
(13.99, 55.38)
>>> for _ in range(49): _ = s()
>>> s()['humidity'], s()['temperature']
(20.68, 71.9)
'''
x = hash(source)
def sine_sensor():
nonlocal x
x += 1
from math import sin
# Make y vary from -.5 to .5
y = .25*(sin(x/60) + sin(x/60/24/100))
return dict(temperature=round(100*(y+.5), 2), humidity=round(40*(y+.3), 2))
return sine_sensor
@sensor
def openweathermap(source):
import json
from urllib import request
resp = request.urlopen('http://api.openweathermap.org/data/2.5/weather?' + source).read()
data = json.loads(resp.decode('utf8'))
return dict(
temperature=data['main']['temp'] - 273.15,
humidity=data['main']['humidity'],
pressure=data['main']['pressure'],
wind=data['wind']['speed'],
)
def empty(source):
class EmptySensor(object):
pass
return EmptySensor()
SENSORS = {
'DHT11': dht11,
'DHT22': dht22,
'CLIMON': climon,
'RANDOM': rand,
'SINE': sine,
'WEB': empty,
'OPENWEATHERMAP': openweathermap,
}
|
__author__ = 'Scott Gramig'
__program__ = 'Paper-Rock-Scissors-Lizard-Spock'
import random
import os
#global variables to track w/l/t
win = 0
loss = 0
tie = 0
def greeting(): # greets player and tells rule
print("Let's play Rock-Paper-Scissors-Lizards-Spock!!")
print("Rules:")
print("0: Rock-------->beats Scissors and Lizard")
print("1: Spock------->beats Rock and Scissors")
print("2: Paper------->beats Rock and Spock")
print("3: Lizard------>beats Paper and Spock")
print("4: Scissors---->beat Paper and Lizard")
print("\nLet's RO SHAM BEAUX!!!\n")
def gameLogic(playerChoice, compChoice): # determines winner of round
#dict for weapons for future use
#weapons = {0: "Rock", 1: "Spock", 2: "Paper", 3: "Lizard", 4: "Scissors"}
#access the global variables
global tie, win, loss
#game logic
if playerChoice == compChoice:
print("TIE!")
tie += 1
elif (playerChoice == 0 and compChoice == 3) or (playerChoice == 0 and compChoice == 4) or \
(playerChoice == 1 and compChoice == 0) or (playerChoice == 1 and compChoice == 4) or \
(playerChoice == 2 and compChoice == 0) or (playerChoice == 2 and compChoice == 1) or \
(playerChoice == 3 and compChoice == 2) or (playerChoice == 3 and compChoice == 1) or \
(playerChoice == 4 and compChoice == 2) or (playerChoice == 4 and compChoice == 3):
print("YOU WIN!")
win += 1
else:
print("YOU LOSE!")
loss += 1
print "Win: %d ----- Loss: %d ----- Tie: %d" % (win, loss, tie)
def gamePlay(): # all in a nice function :)))))
greeting()
userInput = raw_input("Make a choice 0-4 (input 9 to exit):")
userInput = int(userInput)
os.system('clear')
#exit program
if userInput == 9:
exit()
compChoice = random.randrange(0, 5)
compChoice = int(compChoice)
print("Computer choose %d" % compChoice)
gameLogic(userInput, compChoice)
# main block
while 1:
gamePlay()
|
from germanium.annotations import login
from germanium.test_cases.client import ClientTestCase
from germanium.tools import assert_contains
from .test_case import HelperTestCase, AsSuperuserTestCase
class UIOrderingTestCase(AsSuperuserTestCase, HelperTestCase, ClientTestCase):
ISSUE_UI_URL = '/issue/'
@login(is_superuser=True)
def test_superuser_may_read_users_grid(self):
resp = self.get(self.ISSUE_UI_URL)
assert_contains(resp, 'data-col="watched_by_string"')
|
import numpy as np
from numpy.linalg import cholesky, inv
class SigmaSets(object):
"""
Generates sigma points and weights using one of several available
methods.
Parameters
----------
sqrt_method : function(ndarray)
The matrix square root used to compute sigma points.
add : callable (x, y), optional
Function that computes the sum of x and y.
Attributes
----------
sigma_functions : dict
Dictionary mapping sigma point set names to functions for computing
those sets.
sigma_order : dict
Dictionary mapping sigma point set names to their order of accuracy
"""
def __init__(self, sqrt_method = None, add = None):
if sqrt_method is None:
self.sqrt = cholesky
else:
self.sqrt = sqrt_method
if add is None:
self.add = np.add
else:
self.add = add
# Available sigma point sets
self.sigma_functions = {}
self.sigma_functions['merwe'] = self.get_set_merwe
self.sigma_functions['menegaz'] = self.get_set_menegaz
self.sigma_functions['li'] = self.get_set_li
self.sigma_functions['mysovskikh'] = self.get_set_mysovskikh
self.sigma_functions['julier'] = self.get_set_julier
self.sigma_functions['simplex'] = self.get_set_simplex
self.sigma_functions['hermite'] = self.get_set_hermite
# Method order
self.sigma_order = {}
self.sigma_order['merwe'] = 3
self.sigma_order['menegaz'] = 2
self.sigma_order['li'] = 5
self.sigma_order['mysovskikh'] = 3
self.sigma_order['julier'] = 3
self.sigma_order['simplex'] = 2
self.sigma_order['hermite'] = 3
def get_set(self, x, Px, **sigma_args):
"""
Computes the sigma point and weight sets using one of several
available methods.
Parameters
----------
x : scalar, or np.array
Mean vector of length n.
Px : scalar, or np.array
Covariance matrix. If scalar, is treated as eye(n)*P.
set_name : string, default='mwer'
The name of the sigma point set to compute.
**sigma_args : scalar scaling variables
Additional parameters used to scale the sigma points. These
vary from method to method.
Returns
-------
X : np.array, of size (n, N)
Two dimensional array of sigma points. Each column is a
single sigma point.
wm : np.array
Mean weights
wc : np.array
Covariance weights
"""
if np.isscalar(x):
x = np.asarray([x])
n = 1
else:
# State dimension
n = len(x)
if np.isscalar(Px):
Px = np.eye(n)*Px
else:
Px = np.atleast_2d(Px)
# Sigma point set
set_name = 'merwe'
if 'set_name' in sigma_args:
set_name = sigma_args['set_name']
sigma_args.pop('set_name')
# Get sigma points for N(0, I)
X, wm, wc = self.sigma_functions[set_name](n, **sigma_args)
# Change variables to get sigma points for N(x, Px)
X = self.add(x[:,None].repeat(X.shape[1], axis = 1), self.sqrt(Px)@X)
return X, wm, wc
def get_set_merwe(self, n, **scale_args):
"""
Generates sigma points and weights according to the third order
method in [1]_.
Parameters
----------
n : int
Dimensionality of the state. 2n+1 points will be generated.
alpha : float, default = 0.5
Scaling parameter
beta : float, default = 2.
Scaling paramter
kappa : float, default=0.
Scaling parameter
Returns
-------
X : np.array, of size (n, 2n+1)
Two dimensional array of sigma points. Each column is a sigma
point.
wm : np.array
Mean weights.
wc : np.array
Covariance Weights.
References
----------
.. [1] R. Van der Merwe "Sigma-Point Kalman Filters for Probabilitic
Inference in Dynamic State-Space Models" (Doctoral dissertation)
"""
alpha = 0.5
if 'alpha' in scale_args:
alpha = scale_args['alpha']
beta = 2.
if 'beta' in scale_args:
beta = scale_args['beta']
kappa = 3. - n
if 'kappa' in scale_args:
kappa = scale_args['kappa']
lambda_ = alpha**2*(n + kappa) - n
### Sigma points
X = np.sqrt(n + lambda_)*np.block([np.zeros(n)[:,None], np.eye(n), -np.eye(n)])
### Weights
c = 1. / (2.*(n + lambda_))
wc = np.full(2*n + 1, c)
wm = np.full(2*n + 1, c)
wm[0] = lambda_ / (n + lambda_)
wc[0] = lambda_ / (n + lambda_) + (1. - alpha**2 + beta)
return X, wm, wc
def get_set_julier(self, n, **scale_args):
"""
Generates sigma points and weights according to the method in
third order method in [2]_.
Parameters
----------
n : int
Dimensionality of the state. 2n+1 points will be generated.
kappa : float, default=0.
Scaling factor
Returns
-------
X : np.array, of size (n, n+1)
Two dimensional array of sigma points. Each column is a sigma
point.
wm : np.array
Mean weights
wc : np.array
Covariance weights
References
----------
.. [2] S. Julier and J. Uhlmann "New extension of the Kalman filter
to nonlinear systems"
"""
kappa = 3. - n
if 'kappa' in scale_args:
kappa = scale_args['kappa']
# Sigma points
X = np.sqrt(n + kappa)*np.block([np.zeros(n)[:,None], np.eye(n), -np.eye(n)])
# Weights
c = 1. / (2.*(n + kappa))
wm = np.full(2*n + 1, c)
wm[0] = kappa / (n + kappa)
return X, wm, wm
def get_set_menegaz(self, n, **scale_args):
"""
Computes the sigma points and weights using the second order
method in [3]_.
Parameters
----------
n : int
Dimensionality of the state. n+1 points will be generated.
w0 : scalar
A scaling parameter with 0 < w0 < 1
Returns
-------
X : np.array, of size (n, n+1)
Two dimensional array of sigma points. Each column is a sigma
point.
wm : np.array
Mean weights
wc : np.array
Covariance weights
References
----------
.. [3] H.M. Menegaz et al. "A new smallest sigma set for the Unscented
Transform and its applications on SLAM"
"""
w0 = 0.5
# If the first weight is defined
if 'w0' in scale_args:
w0 = scale_args['w0']
if w0 >= 1.0 or w0 <= 0.0:
raise ValueError("w0 must be between 0 and 1")
### Sigma point set
alpha = np.sqrt((1. - w0) / n)
C = self.sqrt(np.diag(np.ones(n), 0) - (alpha**2)*np.ones((n, n)))
C_inv = inv(C)
W = np.diag(np.diag(w0*(alpha**2)*C_inv @ np.ones((n,n)) @ C_inv.T), 0)
W_sqrt = self.sqrt(W)
X = np.zeros((n, n+1))
X[:,0] = -(alpha / np.sqrt(w0))*np.ones(n)
X[:,1:] = C @ inv(W_sqrt)
X = X.T
### Weights
w = np.zeros(n+1)
w[0] = w0
w[1:] = np.diag(W, 0)
return X.T, w, w
def get_set_simplex(self, n, **scale_args):
"""
Generates sigma points and weights according to the second order
simplex method presented in [4]_.
Parameters
----------
n : int
Dimensionality of the state. n+1 points will be generated.
Returns
-------
X : np.array, of size (n, n+1)
Two dimensional array of sigma points. Each column is a sigma
point.
wm : np.array
Mean weights
wc : np.array
Covariance weights
References
----------
.. [4] Phillippe Moireau and Dominique Chapelle "Reduced-Order
Unscented Kalman Filtering with Application to Parameter
Identification in Large-Dimensional Systems"
"""
# Generate sigma points
lambda_ = n / (n + 1)
Istar = np.array([[-1/np.sqrt(2*lambda_), 1/np.sqrt(2*lambda_)]])
for d in range(2, n+1):
row = np.ones((1, Istar.shape[1] + 1)) * 1. / np.sqrt(lambda_*d*(d + 1))
row[0, -1] = -d / np.sqrt(lambda_ * d * (d + 1))
Istar = np.r_[np.c_[Istar, np.zeros((Istar.shape[0]))], row]
X = np.sqrt(n)*Istar
# Generate weights
wm = np.full(n + 1, 1. / (n+1.))
return X, wm, wm
def get_set_li(self, n, **scale_args):
r"""
Computes the sigma points and weights for a modified version of
the fifth order method in [5]_. Setting the scaling parameter
:math:`r = \sqrt{3}` recovers the original method. This method
also requires :math:`n - r^2 -1 \neq 0`.
Parameters
----------
n : int
Dimensionality of the state. 2n^2 + 1 points will be generated.
r : scalar
A scaling parameter with n - r^2 - 1 != 0
Returns
-------
X : np.array, of size (n, 2n^2 + 1)
Two dimensional array of sigma points. Each column is a sigma
point.
wm : np.array
weight for each sigma point for the mean
wc : np.array
weight for each sigma point for the covariance
References
----------
.. [5] Li, Z. et al. "A Novel Fifth-Degree Cubature Kalman Filter
for Real-Time Orbit Determination by Radar"
"""
r = np.sqrt(3./2.)
# If the first weight is defined
if 'r' in scale_args:
r = slace_args['r']
if n < 5 or abs(n - r**2 - 1.) < 1e-16:
raise ValueError("This method requires n>=4 and n - r^2 - 1 != 0")
# Weights
# Coordinate for the first symmetric set
r1 = (r*np.sqrt(n-4.))/np.sqrt(n - r**2 - 1.)
# First symmetric set weight
w2 = (4. - n) / (2. * r1**4)
# Second symmetric set weight
w3 = 1. / (4. * r**4)
# Center point weight
w1 = 1. - 2.*n*w2 - 2.*n*(n-1)*w3
# Vector of weights
w = np.block([w1, np.repeat(w2, 2*n), np.repeat(w3, 2*n*(n-1))])
# Points
# First fully symmetric set
X0 = r1*np.eye(n)
X0_s = np.block([X0, -X0])
# Second fully symmetric set
X1 = r*np.eye(n)
indexes_i = []
indexes_j = []
for i in range(1,n):
indexes_i.append(np.repeat([i],i))
indexes_j.append(np.arange(0,i))
indexes_i = np.concatenate(indexes_i).ravel()
indexes_j = np.concatenate(indexes_j).ravel()
P1 = X1[indexes_i, :].T + X1[indexes_j, :].T
P2 = X1[indexes_i, :].T - X1[indexes_j, :].T
X1_s = np.block([P1, P2, -P1, -P2])
# Full set of points (columns are points)
X = np.block([np.zeros(n)[:,None], X0_s, X1_s])
return X, w, w
def get_set_mysovskikh(self, n, **scale_args):
"""
Computes the sigma points and weights for a fifth order cubature
rule due Mysovskikh, and outlined in [6]_.
Parameters
----------
n : int
Dimensionality of the state. n^2 + 3n + 3 points will be generated.
Returns
-------
X : np.array, of size (n, n^2 + 3n + 3)
Two dimensional array of sigma points. Each column is a sigma
point.
wm : np.array
Mean weights
wc : np.array
Covariance weights
References
----------
.. [6] J. Lu and D.L. Darmofal "Higher-dimensional integration
with gaussian weight for applications in probabilistic design"
"""
# First set of points
I = (np.arange(n)[:,None] + 1).repeat(n + 1, axis = 1).T
R = (np.arange(n + 1) + 1)[:,None].repeat(n, axis = 1)
A = -np.sqrt((n+1.) / (n*(n-I+2.)*(n-I+1.)))
indexes = (I == R)
A[indexes] = np.sqrt( ((n+1.)*(n-R[indexes]+1.)) / (n*(n-R[indexes]+2.)
))
indexes = I > R
A[indexes] = 0.
# Second set of points
ls = np.arange(n+1)[:,None].repeat(n+1)
ks = (np.arange(n+1)[:,None].repeat(n+1, axis = 1).T).flatten()
indexes = ks < ls
B = np.sqrt(n / (2.*(n-1.)))*(A[ks[indexes]] + A[ls[indexes]])
# Full set
#X = np.sqrt(n + 2.)*np.block([[np.zeros(n)], [A], [-A], [B], [-B]])
X = np.block([[np.zeros(n)], [A], [-A], [B], [-B]])
# Weights
w0 = 2./(n+2.)
w1 = (n**2 * (7. - n)) / (2.*(n + 1.)**2 * (n+2.)**2)
w2 = (2.*(n-1.)**2) / ((n+1.)**2 * (n+2.)**2)
w = np.block([w0, np.repeat(w1, 2*len(A)), np.repeat(w2, 2*len(B))])
print(A.shape)
print(B.shape)
quit()
return X.T, w, w
def get_set_hermite(self, n, **scale_args):
"""
Computes the sigma points and weights for the third order Gauss
Hermite method [7]_.
Parameters
----------
n : int
Dimensionality of the state. 3^n points will be generated.
Returns
-------
X : np.array, of size (n, 3^n)
Two dimensional array of sigma points. Each column is a sigma
point.
wm : np.array
weight for each sigma point for the mean
wc : np.array
weight for each sigma point for the covariance
References
----------
.. [7] Peng, Lijun et al. "A New Sparse Gauss-Hermite Cubature
Rule Based on Relative-Weight-Ratios for Bearing-Ranging Target
Tracking"
"""
# Sigma points
X = np.array(np.meshgrid(*[[0., 1. , -1.]]*n)).T.reshape(-1, n).T
# Mean and covariance weights
js = (X**2).sum(axis = 0)
wm = (2./3.)**(n-js) * (1./6.)**(js)
wc = (2./3.)**(n-js) * (1./6.)**(js)
X *= np.sqrt(3.)
return X, wm, wm
|
"""
This file updates the data format as of 03/24 to add the filters to
schematics change
proposed in : https://github.com/facebookresearch/fairo/pull/276
issue: https://github.com/facebookresearch/fairo/issues/219
changeset: https://www.internalfb.com/phabricator/paste/view/P335720205
"""
import argparse
import json
import copy
from pprint import pprint
from os import walk
def update_data(folder):
"""This function walks through the folder and for each file,
performs update on the dataset and writes output to a new
file called : f_name + "_new.txt" (templated.txt -> templated_new.txt)
"""
f = []
for (dirpath, dirnames, filenames) in walk(folder):
for f_name in filenames:
action_names = {}
count = 0
if f_name == "templated_modify.txt":
continue
all_actions = set()
print("processing input file : %r" % (f_name))
file = folder + f_name
with open(file) as f:
new_data = []
count = 0
for line in f.readlines():
flag = False
chat, action_dict = line.strip().split("|")
action_dict = json.loads(action_dict)
if action_dict["dialogue_type"] == "HUMAN_GIVE_COMMAND":
all_keys = list(action_dict.keys())
all_keys.remove("action_sequence")
actions = action_dict["action_sequence"]
new_actions = []
new_ad = {}
action = actions[0]
if len(actions) == 1 and action["action_type"] in ["DANCE", "MOVE"]:
if "repeat" in action and "repeat_count" in action["repeat"]:
# repeat key FOR
flag = True
repeat_count = action["repeat"]["repeat_count"]
action.pop("repeat")
count += 1
for key in all_keys:
new_ad[key] = action_dict[key]
new_ad["action_sequence"] = actions
new_ad["remove_condition"] = {
"condition_type": "COMPARATOR",
"condition": {
"comparison_type": "EQUAL",
"input_left": {
"filters": {
"output": {"attribute": "RUN_COUNT"},
"special": {"fixed_value": "THIS"},
}
},
"input_right": {"value": repeat_count},
},
}
new_data.append([chat, new_ad])
else:
new_data.append([chat, action_dict])
else:
new_data.append([chat, action_dict])
else:
new_data.append([chat, action_dict])
print("Total updates made in this file : %r" % (count))
out_file_name = f_name.split(".txt")[0] + "_new.txt"
out_file = folder + out_file_name
print("Writing to output file: %r" % (out_file))
with open(out_file, "w") as f:
for item in new_data:
chat, action_dict = item
f.write(chat + "|" + json.dumps(action_dict) + "\n")
print("Now computing num updates on output file...")
count = 0
with open(out_file) as f:
for line in f.readlines():
flag = False
chat, action_dict = line.strip().split("|")
action_dict = json.loads(action_dict)
if action_dict["dialogue_type"] == "HUMAN_GIVE_COMMAND":
actions = action_dict["action_sequence"]
if len(actions) == 1 and action["action_type"] in ["DANCE", "MOVE"]:
if "repeat" in action and "repeat_count" in action["repeat"]:
flag = True
if flag:
count += 1
print("Total number of updates needed in out file: %r" % (count))
print("*" * 20)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_folder",
type=str,
help="The folder containing all files that need to be updated",
# Assuming run from ~/droidlet
default="craftassist/agent/datasets/full_data/",
)
args = parser.parse_args()
update_data(args.input_folder)
|
from django.apps import AppConfig
class BadgifyConfig(AppConfig):
name = 'badgify'
verbose_name = 'Badgify'
def ready(self):
super(BadgifyConfig, self).ready()
self.module.autodiscover()
|
from tempfile import TemporaryDirectory
import pytest
from nebullvm.base import DeepLearningFramework
from nebullvm.inference_learners.onnx import ONNX_INFERENCE_LEARNERS
from nebullvm.optimizers.onnx import ONNXOptimizer
from nebullvm.optimizers.tests.utils import get_onnx_model
@pytest.mark.parametrize(
("output_library", "dynamic"),
[
(DeepLearningFramework.PYTORCH, True),
(DeepLearningFramework.PYTORCH, False),
],
)
def test_onnxruntime(output_library: DeepLearningFramework, dynamic: bool):
with TemporaryDirectory() as tmp_dir:
model_path, model_params = get_onnx_model(tmp_dir, dynamic)
optimizer = ONNXOptimizer()
model = optimizer.optimize(model_path, output_library, model_params)
assert isinstance(model, ONNX_INFERENCE_LEARNERS[output_library])
inputs_example = list(model.get_inputs_example())
res = model.predict(*inputs_example)
assert res is not None
if dynamic: # Check also with a smaller bath_size
inputs_example = [
input_[: len(input_) // 2] for input_ in inputs_example
]
res = model.predict(*inputs_example)
assert res is not None
|
import golly as g
s1 = g.getstring("Enter stack size:", "233")
s2 = g.getstring("Enter stdin buffer starting address:", "290")
s3 = g.getstring("Enter stdout buffer starting address:", "790")
RAM_NEGATIVE_BUFFER_SIZE = int(s1)
QFTASM_RAMSTDIN_BUF_STARTPOSITION = int(s2) + RAM_NEGATIVE_BUFFER_SIZE
QFTASM_RAMSTDOUT_BUF_STARTPOSITION = int(s3) + RAM_NEGATIVE_BUFFER_SIZE
RAM_SIZE = 1024
QFTASM_REGAREA_MAX_ADDRESS = 10
p_init = (337, 239)
delta_x = 16
delta_y = 16
d_state2bit = {
6: 0,
7: 1,
}
d_bit2state = {
0: 6,
1: 7,
}
d_state2chr = {6:"_", 7:"*"}
def getcell_by_index(i_x, i_y):
return g.getcell(p_init[0] + i_x * delta_x, p_init[1] + i_y * delta_y)
def write_byte_at(addr, write_byte):
if addr <= QFTASM_REGAREA_MAX_ADDRESS:
addr = addr
elif addr >= RAM_SIZE - RAM_NEGATIVE_BUFFER_SIZE:
addr = RAM_SIZE - addr + QFTASM_REGAREA_MAX_ADDRESS
elif addr > QFTASM_REGAREA_MAX_ADDRESS:
addr = addr + RAM_NEGATIVE_BUFFER_SIZE
b_binary = "{:016b}".format(write_byte)
for i_bit, bit in enumerate(b_binary):
for x_offset in range(2):
g.setcell(p_init[0] + i_bit * delta_x + x_offset, p_init[1] + addr * delta_y, d_bit2state[int(bit)])
with open("src/qftramheader_common.py", "rt") as f:
exec(f.read(), locals(), globals())
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import yaml
from datetime import datetime
CURRENT_DIR = os.path.dirname(__file__)
ROOT_DIR = os.path.abspath(os.path.join(CURRENT_DIR, '../../'))
SOURCES_PATH_CC_BY = os.path.join(ROOT_DIR, 'docs/sources_cc_by.md')
SOURCES_PATH_CC_BY_SA = os.path.join(ROOT_DIR, 'docs/sources_cc_by_sa.md')
PIPELINE_DIR = os.path.join(ROOT_DIR, 'src/pipeline')
sys.path.append(PIPELINE_DIR)
import config
import path_utils
sources_cc_by = config.read_config(filter_no_load_func=False, cc_by_sa=False)
alphabetized_sources_cc_by = sorted(sources_cc_by.items(), key = lambda x: x[1]['attribution']['country'])
sources_cc_by_sa = config.read_config(filter_no_load_func=False, cc_by_sa=True)
alphabetized_sources_cc_by_sa = sorted(sources_cc_by_sa.items(), key = lambda x: x[1]['attribution']['country'])
def source_and_link_str(source_name, link):
str = source_name
str += ' ([link]('
str += link
str += '))'
return str
def add_last_accessed_date(source):
try:
most_recent_date = path_utils.most_recent_data(source)['date']
source['last_accessed'] = str(most_recent_date)
return source
except:
return source
def write_source(item, out):
source = item[1]
source = add_last_accessed_date(source)
attribution = source['attribution']
out.write('#### ' + attribution['country'] + '\n')
if 'source_name' in attribution:
out.write('**Source name:** ')
out.write(source_and_link_str(attribution['source_name'], attribution['main_link']))
out.write('<br>')
if 'data_link' in attribution:
out.write('**Link to data:** ')
out.write(attribution['data_link'])
out.write('<br>')
if 'copyright_notice' in attribution:
out.write('**Copyright notice:** ')
out.write(attribution['copyright_notice'])
out.write('<br>')
if 'original' in attribution:
original_source = attribution['original']
out.write('**Original data source:** ')
out.write(source_and_link_str(original_source['source_name'], original_source['main_link']))
out.write('<br>')
if 'data_link' in original_source:
out.write('**Link to original data:** ')
out.write(original_source['data_link'])
out.write('<br>')
if 'license'in original_source:
out.write('**License for original data:** ')
out.write(source_and_link_str(original_source['license']['name'], original_source['license']['link']))
out.write('<br>')
if 'aggregated_by' in attribution:
agg_source = attribution['aggregated_by']
out.write('**Data aggregated by:** ')
out.write(source_and_link_str(agg_source['source_name'], agg_source['main_link']))
out.write('<br>')
if 'license'in agg_source:
out.write('**License for aggregated data:** ')
out.write(source_and_link_str(agg_source['license']['name'], agg_source['license']['link']))
out.write('<br>')
if 'citation' in attribution:
out.write('**Citation:** ')
out.write(attribution['citation'])
out.write('<br>')
if 'description' in attribution:
out.write('**Description:** ')
out.write(attribution['description'])
out.write('<br>')
if 'license' in source:
out.write('**License:** ')
out.write(source_and_link_str(source['license']['name'], source['license']['link']))
out.write('<br>')
if 'last_accessed' in source:
out.write('**Last accessed:** ' + source['last_accessed'])
out.write('\n\n')
with open(SOURCES_PATH_CC_BY, 'w') as out:
for item in alphabetized_sources_cc_by:
write_source(item, out)
with open(SOURCES_PATH_CC_BY_SA, 'w') as out:
for item in alphabetized_sources_cc_by_sa:
write_source(item, out)
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
# KFServing K8S constants
KSERVE_GROUP = 'serving.kserve.io'
KSERVE_KIND = 'InferenceService'
KSERVE_PLURAL = 'inferenceservices'
KSERVE_KIND_TRAINEDMODEL = 'TrainedModel'
KSERVE_PLURAL_TRAINEDMODEL = 'trainedmodels'
KSERVE_V1BETA1_VERSION = 'v1beta1'
KSERVE_V1ALPHA1_VERSION = "v1alpha1"
KSERVE_V1BETA1 = KSERVE_GROUP + '/' + KSERVE_V1BETA1_VERSION
KSERVE_V1ALPHA1 = KSERVE_GROUP + '/' + KSERVE_V1ALPHA1_VERSION
KSERVE_LOGLEVEL = os.environ.get('KSERVE_LOGLEVEL', 'INFO').upper()
# INFERENCESERVICE credentials common constants
INFERENCESERVICE_CONFIG_MAP_NAME = 'inferenceservice-config'
INFERENCESERVICE_SYSTEM_NAMESPACE = 'kserve'
DEFAULT_SECRET_NAME = "kserve-secret-"
DEFAULT_SA_NAME = "kserve-service-credentials"
# S3 credentials constants
S3_ACCESS_KEY_ID_DEFAULT_NAME = "AWS_ACCESS_KEY_ID"
S3_SECRET_ACCESS_KEY_DEFAULT_NAME = "AWS_SECRET_ACCESS_KEY"
S3_DEFAULT_CREDS_FILE = '~/.aws/credentials'
# GCS credentials constants
GCS_CREDS_FILE_DEFAULT_NAME = 'gcloud-application-credentials.json'
GCS_DEFAULT_CREDS_FILE = '~/.config/gcloud/application_default_credentials.json'
# Azure credentials constants
AZ_DEFAULT_CREDS_FILE = '~/.azure/azure_credentials.json'
|
import RtAudio
import numpy
import time
# ID of the output device. You can find this from printIODevices.py
deviceId = 2
sampleRate = 44100
bufferSize = 1024
rate = 0.005
def sawGen(bufferSize):
""" Make a numpy array with a saw wave in """
lastValue = 0
while 1:
sawValues = []
for i in range(bufferSize):
sawValues.append(lastValue)
lastValue += rate
if lastValue > 1:
lastValue -= 2
arr = numpy.array(sawValues)
yield arr
makeSaw = sawGen(1024)
def saw():
# Blank the output buffer
sawWave = makeSaw.next()
return sawWave
if __name__ == "__main__":
io = RtAudio.RtAudio()
io.openStream(deviceId, sampleRate, bufferSize)
io.startStream()
while 1:
try:
if io.needWrite() >= bufferSize:
io.write(saw())
else:
time.sleep(float(bufferSize)/sampleRate)
except KeyboardInterrupt:
break
io.stopStream()
io.closeStream()
|
import unittest
from fizzbuzz import *
class FizzBuzzTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def test_fizzbuzz(self):
model = FizzBuzzModel()
self.assertEquals(self.__solution(), model.solve())
@staticmethod
def __solution():
solution = []
for num in xrange(1, 201):
msg = ''
if num % 3 == 0:
msg += 'fizz'
if num % 5 == 0:
msg += 'buzz'
if num % 7 == 0:
msg += 'pop'
solution.append(msg or str(num))
return solution
|
dna_seq1 = 'ACC'
nucl_count = {'A': 0,
'C': 0,
'G': 0,
'T': 0}
# Iterate through each nucleotide in turn:
for nucl in dna_seq1:
# Print current state of nucl_count dictionary:
print('Dictionary at start of loop: ' + str(nucl_count))
# Print current value of nucl:
print('Current nucl: ' + nucl)
# Print current count of nucl from dictionary:
current_count_of_nucl = nucl_count[nucl]
print('Current count of ' + nucl + ': ' + str(current_count_of_nucl))
# Update count of nucl:
new_count_of_nucl = current_count_of_nucl + 1
print('Updated count of ' + nucl + ': ' + str(new_count_of_nucl))
# Update dictionary with new count for nucl:
nucl_count[nucl] = new_count_of_nucl
# Print updated state of nucl_count dictionary:
print('Dictionary at end of loop: ' + str(nucl_count))
# Print new line:
print()
# Print final nucleotide counts using the 'items' method to iterate
# through dictionaries:
for key, value in nucl_count.items():
print(str(key) + ' ' + str(value))
|
"""The Logitech Squeezebox integration."""
import logging
from openpeerpower.components.media_player import DOMAIN as MP_DOMAIN
from openpeerpower.config_entries import ConfigEntry
from openpeerpower.core import OpenPeerPower
from .const import DISCOVERY_TASK, DOMAIN, PLAYER_DISCOVERY_UNSUB
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [MP_DOMAIN]
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry):
"""Set up Logitech Squeezebox from a config entry."""
opp.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(opp, entry):
"""Unload a config entry."""
# Stop player discovery task for this config entry.
opp.data[DOMAIN][entry.entry_id][PLAYER_DISCOVERY_UNSUB]()
# Remove stored data for this config entry
opp.data[DOMAIN].pop(entry.entry_id)
# Stop server discovery task if this is the last config entry.
current_entries = opp.config_entries.async_entries(DOMAIN)
if len(current_entries) == 1 and current_entries[0] == entry:
_LOGGER.debug("Stopping server discovery task")
opp.data[DOMAIN][DISCOVERY_TASK].cancel()
opp.data[DOMAIN].pop(DISCOVERY_TASK)
return await opp.config_entries.async_unload_platforms(entry, PLATFORMS)
|
import os
import sys
import json, csv
from bs4 import BeautifulSoup
import xml.etree.ElementTree as ET
from copy import copy
def dictify(r,root=True):
if root:
return {r.tag : dictify(r, False)}
d=copy(r.attrib)
if r.text:
d["_text"]=r.text
for x in r.findall("./*"):
if x.tag not in d:
d[x.tag]=[]
d[x.tag].append(dictify(x,False))
return d
def parse(file):
print("parsing: " + str(file))
parse_data = {}
with open(file, "r") as f:
data = f.read().strip().split("\n")
for line in data:
line = line.strip()
if len(line) == 0:
continue
if line[-1] == "|":
line = line[0:-1]
# print("Old line: " + line)
line = line.replace("a | s, ", "a PIPE s, ")
# print("New line: " + line)
items = line.split(" | ")
line_data = {}
for kvpair in items:
if len(kvpair) == 0:
continue
# print kvpair
key = kvpair.strip().split(":", 1)[0].strip()
value = kvpair.strip().split(":", 1)[1].strip()
# print key + ":" + value
line_data[key] = value
if "Discourse Facet" not in line_data:
line_data["Discourse Facet"] = "None"
line_data["Reference Article"] = line_data["Reference Article"].replace(".xml", "")
line_data["Citing Article"] = line_data["Citing Article"].replace(".xml", "")
print("original cit marker offset is " + line_data["Citation Marker Offset"])
if line_data["Citation Marker Offset"].startswith("["):
line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][1:]
if line_data["Citation Marker Offset"].endswith("]"):
line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][:-1]
if line_data["Citation Marker Offset"].startswith("\'"):
line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][1:]
if line_data["Citation Marker Offset"].endswith("\'"):
line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][:-1]
if line_data["Citation Offset"].startswith("["):
line_data["Citation Offset"] = line_data["Citation Offset"][1:]
if line_data["Citation Offset"].endswith("]"):
line_data["Citation Offset"] = line_data["Citation Offset"][:-1]
print("new cit marker offset is " + line_data["Citation Marker Offset"])
if line_data["Reference Article"] not in parse_data:
parse_data[line_data["Reference Article"]] = {}
if line_data["Citing Article"] not in parse_data[line_data["Reference Article"]]:
parse_data[line_data["Reference Article"]][line_data["Citing Article"]] = {}
if line_data["Citation Marker Offset"] not in parse_data[line_data["Reference Article"]][line_data["Citing Article"]]:
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]] = {"original": line_data, "comparable": False}
ref_offset = line_data["Reference Offset"]
if ref_offset.startswith("["):
ref_offset = ref_offset[1:]
if ref_offset.endswith("]"):
ref_offset = ref_offset[:-1]
parsed_ref_offset_tmp = [x.strip() for x in ref_offset.split(",")]
print("\n\n")
print(parsed_ref_offset_tmp)
parsed_ref_offset = []
for ref in parsed_ref_offset_tmp:
print(ref)
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_ref_offset.append(ref)
print(parsed_ref_offset)
# print("<root>" + line_data["Reference Text"] + "</root>")
line = "<root>" + line_data["Reference Text"] + "</root>"
# print("Line is:")
# print(line)
line = line.replace("&", "&")
line = str(BeautifulSoup(line, "xml"))
# line = line.replace("<\s>", "</s>")
# print("Line is:")
# print(line)
root = ET.fromstring(line)
ref_text_dict = dictify(root)
# print(ref_text_dict)
ref_text_dict_clean = {}
cnt = 0
for item in ref_text_dict["root"]["S"]:
cnt += 1
ref_text_dict_clean[item.get("sid", cnt)] = item["_text"]
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Reference Text"] = ref_text_dict_clean
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Reference Offset"] = parsed_ref_offset
ref_discourse_facet = line_data["Discourse Facet"]
parsed_discourse_facet = []
if len(ref_discourse_facet) > 0:
if ref_discourse_facet[0] == "[":
parsed_discourse_facet_tmp = [x.strip().lower().replace(" ", "_") for x in ref_discourse_facet[1:-1].split(",")]
parsed_discourse_facet = []
for ref in parsed_discourse_facet_tmp:
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_discourse_facet.append(ref)
else:
ref = ref_discourse_facet.lower().replace(" ", "_")
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_discourse_facet.append(ref)
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Discourse Facet"] = parsed_discourse_facet
# print(json.dumps(parse_data, sort_keys=True, indent=4))
# print("###################################################################################################################")
return parse_data
def parse_csv(file):
print("parsing: " + str(file))
parse_data = {}
csv_obj = csv.reader(open(file,"r"))
items_list = None
for i, row in enumerate(csv_obj):
if i==0: # first line
items_list = row # Citance Number,Reference Article, ...
continue
line_data = {}
if len(row) != len(items_list):
print "Error: # of items mismatch"
print items_list
print row
continue
for key, value in zip(items_list, row):
# print kvpair
line_data[key] = value
if line_data["Reference Text"] == "NA":
continue
# print items_list
print line_data["Reference Text"]
line_data["Reference Article"] = line_data["Reference Article"].replace(".xml", "")
line_data["Citing Article"] = line_data["Citing Article"].replace(".xml", "")
print("original cit marker offset is " + line_data["Citation Marker Offset"])
# if line_data["Citation Marker Offset"].startswith("["):
# line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][1:]
# if line_data["Citation Marker Offset"].endswith("]"):
# line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][:-1]
# if line_data["Citation Marker Offset"].startswith("\'"):
# line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][1:]
# if line_data["Citation Marker Offset"].endswith("\'"):
# line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][:-1]
# if line_data["Citation Offset"].startswith("["):
# line_data["Citation Offset"] = line_data["Citation Offset"][1:]
# if line_data["Citation Offset"].endswith("]"):
# line_data["Citation Offset"] = line_data["Citation Offset"][:-1]
line_data["Citation Marker Offset"] = '0'
line_data["Citation Offset"] = '0'
print("new cit marker offset is " + line_data["Citation Marker Offset"])
if line_data["Reference Article"] not in parse_data:
parse_data[line_data["Reference Article"]] = {}
if line_data["Citing Article"] not in parse_data[line_data["Reference Article"]]:
parse_data[line_data["Reference Article"]][line_data["Citing Article"]] = {}
if line_data["Citation Marker Offset"] not in parse_data[line_data["Reference Article"]][line_data["Citing Article"]]:
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]] = {"original": line_data, "comparable": False}
ref_offset = line_data["Reference Offset"]
if ref_offset.startswith("["):
ref_offset = ref_offset[1:]
if ref_offset.endswith("]"):
ref_offset = ref_offset[:-1]
parsed_ref_offset_tmp = [x.strip() for x in ref_offset.split(",")]
print("\n\n")
print(parsed_ref_offset_tmp)
parsed_ref_offset = []
for ref in parsed_ref_offset_tmp:
print(ref)
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_ref_offset.append(ref)
print(parsed_ref_offset)
# print("<root>" + line_data["Reference Text"] + "</root>")
line = "<root>" + line_data["Reference Text"] + "</root>"
# print("Line is:")
# print(line)
line = line.replace("&", "&")
line = line.replace("&", "&")
line = str(BeautifulSoup(line, "xml"))
# line = line.replace("<\s>", "</s>")
# print("Line is:")
# print(line)
root = ET.fromstring(line)
ref_text_dict = dictify(root)
# print(ref_text_dict)
ref_text_dict_clean = {}
cnt = 0
# if "S" not in ref_text_dict["root"]:
# # print "Key Error at", file
# continue
try:
for item in ref_text_dict["root"]["S"]:
cnt += 1
ref_text_dict_clean[item.get("sid", cnt)] = item["_text"]
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Reference Text"] = ref_text_dict_clean
# print "ref_text_dict_clean", ref_text_dict_clean
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Reference Offset"] = parsed_ref_offset
except:
print "Error in Reference Offset"
continue
try:
ref_discourse_facet = line_data["Discourse Facet"]
parsed_discourse_facet = []
if len(ref_discourse_facet) > 0:
if ref_discourse_facet[0] == "[":
parsed_discourse_facet_tmp = [x.strip().lower().replace(" ", "_") for x in ref_discourse_facet[1:-1].split(",")]
parsed_discourse_facet = []
for ref in parsed_discourse_facet_tmp:
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_discourse_facet.append(ref)
else:
parsed_discourse_facet_tmp = [x.strip().lower().replace(" ", "_") for x in ref_discourse_facet.split(",")]
parsed_discourse_facet = []
for ref in parsed_discourse_facet_tmp:
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_discourse_facet.append(ref)
print "parsed_discourse_facet", parsed_discourse_facet
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Discourse Facet"] = parsed_discourse_facet
except:
print "Error in Discourse Facet"
continue
# print(json.dumps(parse_data, sort_keys=True, indent=4))
# print("###################################################################################################################")
return parse_data
def calculate(gold_data, submit_data):
# print(json.dumps(gold_data, indent=4, sort_keys=True))
# print(json.dumps(submit_data, indent=4, sort_keys=True))
[TP_ref, FN_ref, FP_ref, TP_facet, FN_facet, FP_facet] = [0, 0, 0, 0, 0, 0]
for ref_article in gold_data:
for cit_article in gold_data[ref_article]:
for cit_marker_offset in gold_data[ref_article][cit_article]:
old_TP_ref = TP_ref
try:
for ref_offset in gold_data[ref_article][cit_article][cit_marker_offset]["Reference Offset"]:
try:
ref_offset_list = submit_data[ref_article][cit_article][cit_marker_offset]["Reference Offset"]
if ref_offset in ref_offset_list:
TP_ref += 1
gold_data[ref_article][cit_article][cit_marker_offset]["comparable"] = True
else:
FN_ref += 1
except KeyError as e:
print("IGNORE THIS: key error 1")
FN_ref += 1
except: continue
for ref_article in submit_data:
for cit_article in submit_data[ref_article]:
for cit_marker_offset in submit_data[ref_article][cit_article]:
try:
for ref_offset in submit_data[ref_article][cit_article][cit_marker_offset]["Reference Offset"]:
try:
ref_offset_list = gold_data[ref_article][cit_article][cit_marker_offset]["Reference Offset"]
if ref_offset not in ref_offset_list:
FP_ref += 1
except KeyError as e:
print("IGNORE THIS: key error 2")
FP_ref += 1
except: continue
[precision_ref, recall_ref, f_ref] = [0.0, 0.0, 0.0]
try:
precision_ref = TP_ref / float(TP_ref + FP_ref)
except ZeroDivisionError as e:
precision_ref = 0
try:
recall_ref = TP_ref / float(TP_ref + FN_ref)
except ZeroDivisionError as e:
recall_ref = 0
try:
f_ref = 2.0 * precision_ref * recall_ref / float(precision_ref + recall_ref)
except ZeroDivisionError as e:
f_ref = 0
for ref_article in gold_data:
for cit_article in gold_data[ref_article]:
for cit_marker_offset in gold_data[ref_article][cit_article]:
try:
for facet in gold_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"]:
if gold_data[ref_article][cit_article][cit_marker_offset]["comparable"]:
print("\n\n")
print(ref_article)
print(cit_article)
print(cit_marker_offset)
print(facet)
print(submit_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"])
try:
if facet in submit_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"]:
TP_facet += 1
else:
FN_facet += 1
except KeyError as e:
print("IGNORE THIS: Key error 4")
FN_facet += 1
else:
FN_facet += 1
except: continue
for ref_article in submit_data:
for cit_article in submit_data[ref_article]:
for cit_marker_offset in submit_data[ref_article][cit_article]:
try:
for facet in submit_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"]:
try:
if gold_data[ref_article][cit_article][cit_marker_offset]["comparable"]:
if facet not in gold_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"]:
FP_facet += 1
except KeyError as e:
print("IGNORE THIS: Key error 5")
FP_facet += 1
except: continue
[precision_facet, recall_facet, f_facet] = [0.0, 0.0, 0.0]
try:
precision_facet = TP_facet / float(TP_facet + FP_facet)
except ZeroDivisionError as e:
precision_facet = 0
try:
recall_facet = TP_facet / float(TP_facet + FN_facet)
except ZeroDivisionError as e:
recall_facet = 0
try:
f_facet = 2.0 * precision_facet * recall_facet / float(precision_facet + recall_facet)
except ZeroDivisionError as e:
f_facet = 0
return (precision_ref, recall_ref, f_ref, precision_facet, recall_facet, f_facet, TP_ref, FP_ref, FN_ref, TP_facet, FP_facet, FN_facet)
def evaluate(gold_file, submit_file, score_file):
# print(gold_file)
# print(submit_file)
gold_data = parse_csv(gold_file)
submit_data = parse_csv(submit_file)
(p_ref, r_ref, f_ref, p_facet, r_facet, f_facet, TP_ref, FP_ref, FN_ref, TP_facet, FP_facet, FN_facet) = calculate(gold_data, submit_data)
with open(score_file, "a") as f:
f.write(os.path.basename(gold_file) + "_task1a_precision: " + str(p_ref) + "\n")
f.write(os.path.basename(gold_file) + "_task1a_recall: " + str(r_ref) + "\n")
f.write(os.path.basename(gold_file) + "_task1a_f1: " + str(f_ref) + "\n")
f.write(os.path.basename(gold_file) + "_task1b_precision: " + str(p_facet) + "\n")
f.write(os.path.basename(gold_file) + "_task1b_recall: " + str(r_facet) + "\n")
f.write(os.path.basename(gold_file) + "_task1b_f1: " + str(f_facet) + "\n")
return (p_ref, r_ref, f_ref, p_facet, r_facet, f_facet, TP_ref, FP_ref, FN_ref, TP_facet, FP_facet, FN_facet)
def main(input_dir, output_dir):
if not os.path.exists(input_dir):
print("%s not a valid director" % input_dir)
if not os.path.exists(output_dir):
print("%s not a valid director" % output_dir)
truth_dir = os.path.join(input_dir, "ref", "Task1")
if not os.path.exists(truth_dir):
print("%s not a valid director" % truth_dir)
submit_dir = os.path.join(input_dir, "res", "Task1")
if not os.path.exists(submit_dir):
print("%s not a valid director" % submit_dir)
score_file = os.path.join(output_dir, "scores.txt")
if os.path.exists(score_file):
os.remove(score_file)
P_ref_list = []
P_facet_list = []
R_ref_list = []
R_facet_list = []
F_ref_list = []
F_facet_list = []
TP_ref_list = []
FP_ref_list = []
FN_ref_list = []
TP_facet_list = []
FP_facet_list = []
FN_facet_list = []
for gold_file in os.listdir(truth_dir):
if gold_file.startswith('.'):
continue
paper_id = gold_file.split('_')[0]
submit_file = os.path.join(submit_dir, paper_id +".csv")
if not os.path.exists(submit_file):
continue
(p_ref, r_ref, f_ref, p_facet, r_facet, f_facet, TP_ref, FP_ref, FN_ref, TP_facet, FP_facet, FN_facet) = evaluate(os.path.join(truth_dir, gold_file), submit_file, score_file)
P_ref_list.append(p_ref)
P_facet_list.append(p_facet)
R_ref_list.append(r_ref)
R_facet_list.append(r_facet)
F_ref_list.append(f_ref)
F_facet_list.append(f_facet)
TP_ref_list.append(TP_ref)
FP_ref_list.append(FP_ref)
FN_ref_list.append(FN_ref)
TP_facet_list.append(TP_facet)
FP_facet_list.append(FP_facet)
FN_facet_list.append(FN_facet)
TP_ref_sum = sum(TP_ref_list)
FP_ref_sum = sum(FP_ref_list)
FN_ref_sum = sum(FN_ref_list)
TP_facet_sum = sum(TP_facet_list)
FP_facet_sum = sum(FP_facet_list)
FN_facet_sum = sum(FN_facet_list)
try:
precision_ref_micro = TP_ref_sum / float(TP_ref_sum + FP_ref_sum)
except ZeroDivisionError as e:
precision_ref_micro = 0
try:
recall_ref_micro = TP_ref_sum / float(TP_ref_sum + FN_ref_sum)
except ZeroDivisionError as e:
recall_ref_micro = 0
try:
f_ref_micro = 2.0 * precision_ref_micro * recall_ref_micro / float(precision_ref_micro + recall_ref_micro)
except ZeroDivisionError as e:
f_ref_micro = 0
try:
precision_ref_macro = sum(P_ref_list) / len(P_ref_list)
except ZeroDivisionError as e:
precision_ref_macro = 0
try:
recall_ref_macro = sum(R_ref_list) / len(R_ref_list)
except ZeroDivisionError as e:
recall_ref_macro = 0
try:
f_ref_macro = 2.0 * precision_ref_macro * recall_ref_macro / float(precision_ref_macro + recall_ref_macro)
except ZeroDivisionError as e:
f_ref_macro = 0
try:
# precision_facet_micro = TP_ref_sum / float(TP_ref_sum + FP_ref_sum)
precision_facet_micro = TP_facet_sum / float(TP_facet_sum + FP_facet_sum)
except ZeroDivisionError as e:
precision_facet_micro = 0
try:
# recall_facet_micro = TP_ref_sum / float(TP_ref_sum + FN_ref_sum)
recall_facet_micro = TP_facet_sum / float(TP_facet_sum + FN_facet_sum)
except ZeroDivisionError as e:
recall_facet_micro = 0
try:
# f_facet_micro = 2.0 * precision_ref_micro * recall_ref_micro / float(precision_ref_micro + recall_ref_micro)
f_facet_micro = 2.0 * precision_facet_micro * recall_facet_micro / float(precision_facet_micro + recall_facet_micro)
except ZeroDivisionError as e:
f_facet_micro = 0
try:
precision_facet_macro = sum(P_facet_list) / len(P_facet_list)
except ZeroDivisionError as e:
precision_facet_macro = 0
try:
recall_facet_macro = sum(R_facet_list) / len(R_facet_list)
except ZeroDivisionError as e:
recall_facet_macro = 0
try:
f_facet_macro = 2.0 * precision_facet_macro * recall_facet_macro / float(precision_facet_macro + recall_facet_macro)
except ZeroDivisionError as e:
f_facet_macro = 0
with open(score_file, "a") as f:
f.write("task1a_precision_micro_avg: " + str(precision_ref_micro) + "\n")
f.write("task1a_precision_macro_avg: " + str(precision_ref_macro) + "\n")
f.write("task1a_recall_micro_avg: " + str(recall_ref_micro) + "\n")
f.write("task1a_recall_macro_avg: " + str(recall_ref_macro) + "\n")
f.write("task1a_f1_micro_avg: " + str(f_ref_micro) + "\n")
f.write("task1a_f1_macro_avg: " + str(f_ref_macro) + "\n")
f.write("task1b_precision_micro_avg: " + str(precision_facet_micro) + "\n")
f.write("task1b_precision_macro_avg: " + str(precision_facet_macro) + "\n")
f.write("task1b_recall_micro_avg: " + str(recall_facet_micro) + "\n")
f.write("task1b_recall_macro_avg: " + str(recall_facet_macro) + "\n")
f.write("task1b_f1_micro_avg: " + str(f_facet_micro) + "\n")
f.write("task1b_f1_macro_avg: " + str(f_facet_macro) + "\n")
if __name__ == "__main__":
input_dir = sys.argv[1]
output_dir = sys.argv[2]
main(input_dir, output_dir)
|
# Copyright (c) 2015 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
import unittest
import steelscript.netshark.core.filters as filters
class FilterTests(unittest.TestCase):
def test_timefilter(self):
filters.TimeFilter.parse_range('last 5 minutes')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2021 The MITRE Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import sys
import struct
import binascii
import logging
import argparse
import progressbar
from datetime import datetime
from Registry import Registry
__version__ = "1.0.0"
__author__ = "Jason Batchelor"
log = logging.getLogger(__name__)
def iid_text_to_bin(iid):
"""
Process an IID and convert to a YARA compliant search string.
Below describes the GUID structure used to describe an identifier
for a MAPI interface:
https://msdn.microsoft.com/en-us/library/office/cc815892.aspx
:param str iid: Name of the IID to convert
:return: bin_yara
:rtype: str
"""
# remove begin and end brackets
guid = re.sub('[{}-]', '', iid)
# convert to binary representation
bin_struc = struct.unpack("IHH8B", binascii.a2b_hex(guid))
bin_str = '%.8X%.4X%.4X%s' % \
(bin_struc[0], bin_struc[1], bin_struc[2],
(''.join('{:02X}'.format(x) for x in bin_struc[3:])))
# create YARA compliant search string
bin_yara = '{ ' + ' '.join(a + b for a, b in
zip(bin_str[::2], bin_str[1::2])) + ' }'
return bin_yara
def enumerate_com_interfaces(reg_keys, show_bar=False):
"""
Iterate through registry keys and retrieve unique interface identifiers
and their name.
:param list reg_keys: List of registry key objects from python-registry
module.
:param bool show_bar: Show progressbar as subfiles are identified.
:param bytes buff: File to look for subfiles.
:return: com
:rtype: dict
"""
total_iters = 0
counter = 0
com = {}
for key in reg_keys:
total_iters += len(key.subkeys())
if show_bar:
print('Processing %s results...' % total_iters)
bar = progressbar.ProgressBar(redirect_stdout=True,
max_value=total_iters)
for key in reg_keys:
for subkey in key.subkeys():
for v in list(subkey.values()):
# Per MS documentation, Interface names must start with the
# 'I' prefix, so we limit our values here as well.
# Not doing so can lead to some crazy names and conflicting
# results!
# https://docs.microsoft.com/en-us/dotnet/standard/design-guidelines/names-of-classes-structs-and-interfaces
if v.value_type() == Registry.RegSZ \
and v.name() == '(default)' \
and v.value().startswith('I'):
bin_guid = iid_text_to_bin(subkey.name())
# Names with special characters/spaces are truncated
stop_chars = ['_', '<', '[', ' ']
index = min(v.value().find(i)
if i in v.value()
else
len(v.value())
for i in stop_chars)
value = v.value()[:index]
if value not in com:
com[value] = [bin_guid]
elif bin_guid not in com[value]:
com[value].append(bin_guid)
if show_bar:
bar.update(counter)
counter += 1
if show_bar:
bar.finish()
return com
def initialize_parser():
parser = argparse.ArgumentParser(
description="Crawls windows registry to hunt for and convert IIDs for "
"COM interfaces to binary YARA signatures. The submitted "
"hives must be from HKLM\\SOFTWARE. Make copies of "
"these files off an active Windows OS using the command "
"'reg save HKLM\\SOFTWARE hklm_sft.hiv' when running as "
"administrator.")
parser.add_argument('hive', metavar='FILE', nargs='*',
help='Full path to the registry hive to be processed.')
parser.add_argument('-o', '--output-filename', type=str,
default='com_interface_ids.yara',
help='Filename to write YARA signatures '
'to (default: com_interface_ids.yara)')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Output additional information when processing '
'(mostly for debugging purposes).')
return parser
def main():
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
if args.verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.WARNING)
if len(args.hive) == 0:
p.print_help()
sys.exit(2)
keys = []
for hive in args.hive:
print('Collecting IIDs from %s...' % hive)
if not os.path.isfile(hive):
log.warning('Failed to find file %s. Skipping...' % hive)
continue
try:
reg = Registry.Registry(hive)
except Registry.RegistryParse.ParseException:
log.warning('Error parsing %s. Skipping...' % hive)
continue
try:
keys.append(reg.open("Classes\\Interface"))
except Registry.RegistryKeyNotFoundException:
log.warning("Couldn't find 'Classes\\Interface' key in %s." % hive)
try:
keys.append(reg.open("Classes\\Wow6432Node\\Interface"))
except Registry.RegistryKeyNotFoundException:
log.warning("Couldn't find 'Classes\\Wow6432Node\\Interface\\ "
"key in %s." % hive)
com_signatures = enumerate_com_interfaces(keys, True)
counter = 0
total_rules = len(com_signatures)
print('Generating %s YARA signatures...' % total_rules)
bar = progressbar.ProgressBar(redirect_stdout=True, max_value=total_rules)
yara_rule = '// %s\n// COM IID YARA sig collection.\n// ' \
'Autogenerated on %s\n\n' % (__author__, datetime.now())
for name, rules in com_signatures.items():
yara_rule += 'rule %s\n{\n\t' \
'strings:' % name
if len(rules) > 1:
for i in range(0, len(rules)):
yara_rule += '\n\t\t$%s_%s = %s' % (name, i, rules[i])
else:
yara_rule += '\n\t\t$%s = %s' % (name, rules[0])
yara_rule += '\n\tcondition:\n\t\tany of them\n}\n'
bar.update(counter)
counter += 1
bar.finish()
print('Writing YARA rules to %s' % args.output_filename)
with open(args.output_filename, 'w') as f:
f.write(yara_rule)
f.close()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python2
CHANNEL_FREQUENCIES = {
1: 2412,
2: 2417,
3: 2422,
4: 2427,
5: 2432,
6: 2437,
7: 2442,
8: 2447,
9: 2452,
10: 2457,
11: 2462,
12: 2467,
13: 2472,
36: 5180,
40: 5200,
44: 5220,
48: 5240,
52: 5260,
56: 5280,
60: 5300,
64: 5320,
100: 5500,
104: 5520,
108: 5540,
112: 5560
}
DRONE_VENDOR_MACS = {
'a0:14:3d': 'Parrot',
'90:3a:e6': 'Parrot',
'90:03:b7': 'Parrot',
'00:26:7e': 'Parrot',
'00:12:1c': 'Parrot',
'60:60:1f': 'DJI'
#'04:d6:aa': 'Samsung'
}
PHY_TYPES = {
"4": "802.11b",
"5": "802.11a",
"6": "802.11g",
"7": "802.11n",
"8": "802.11ac"
}
|
# Runtime: 56 ms, faster than 52.34% of Python3 online submissions for Richest Customer Wealth.
# Memory Usage: 14.2 MB, less than 61.78% of Python3 online submissions for Richest Customer Wealth.
# https://leetcode.com/submissions/detail/591520867/
class Solution:
def maximumWealth(self, accounts: List[List[int]]) -> int:
max_account = 0
for account in accounts:
if(sum(account)) > max_account:
max_account = sum(account)
return max_account
|
# Copyright (c) 2013, Craft and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
sales_data = frappe.db.sql('''select
'Customer' as party_type,
customer_name,
po_no,
tax_id,
name,
posting_date,
total,
discount_amount,
net_total,
total_taxes_and_charges,
grand_total
from `tabSales Invoice` si
where date(posting_date) BETWEEN "{}" and "{}" and docstatus = 1 Order by date(posting_date) ASC'''.format(filters.get("from_date"), filters.get("to_date")))
purchase_data = frappe.db.sql('''select supplier_name,
'Supplier' as party_type,
bill_no,
tax_id,
name,
posting_date,
total,
discount_amount,
net_total,
total_taxes_and_charges,
grand_total
from `tabPurchase Invoice`
where date(posting_date) BETWEEN "{}" and "{}" and docstatus = 1 Order by date(posting_date) ASC'''.format(filters.get("from_date"), filters.get("to_date")))
"""journal_data = frappe.db.sql('''select je.supplier,
je.tax_id,
je.tax_bill_no,
je.tax_bill_date,
je.total_debit,
jea.debit,
je.total_debit
from `tabJournal Entry` je
inner join `tabJournal Entry Account` jea
on je.name = jea.parent
where date(je.posting_date) BETWEEN "{}" and "{}" and je.docstatus = 1 and jea.account = "60002 - Vat Output Account - GE" and je.supplier <> "" '''.format(filters.get("from_date"),filters.get("to_date"))) """
if filters.get("entry_type") == "Sales Invoice":
data = sales_data
elif filters.get("entry_type") == "Purchase Invoice":
data = purchase_data
return columns, data
def get_columns():
return [
_("Party Type") + ":Data:120",
_("Party") + ":Data:120",
_("Bill No") + ":Data:120",
_("TRN") + ":Data:120",
_("INV No") + ":Link/Sales Invoice:120",
_("INV Date") + ":Date:160",
_("Gross") + ":Currency:120",
_("Discount") + ":Currency:120",
_("Net Amount") + ":Currency:120",
_("Vat") + ":Currency:120",
_("Total") + ":Currency:100",
]
|
from pythonforandroid.toolchain import PythonRecipe, shprint, current_directory, ArchAndroid
from os.path import exists, join
import sh
import glob
class PySDL2Recipe(PythonRecipe):
version = '0.9.3'
url = 'https://bitbucket.org/marcusva/py-sdl2/downloads/PySDL2-{version}.tar.gz'
depends = ['sdl2']
recipe = PySDL2Recipe()
|
######################################################################################
# Author: Srijan Verma, BITS Pilani, India #
# Code developed in Sirimulla Research Group (http://sirimullaresearchgroup.com/) #
# University of Texas at El Paso, Tx, USA #
# Last modified: 25/08/2020 #
######################################################################################
from rdkit.Chem import MACCSkeys, AllChem
from rdkit.Avalon import pyAvalonTools as fpAvalon
from rdkit.Chem import rdMolDescriptors
import tempfile, os
import shutil
from rdkit.ML.Descriptors import MoleculeDescriptors
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem, Descriptors
# RDKit descriptors -->
calc = MoleculeDescriptors.MolecularDescriptorCalculator([x[0] for x in Descriptors._descList])
# Function for generating TPATF features, using Mayachem tools
def get_tpatf(m):
# Creates a temp folder
temp_dir = tempfile.mkdtemp()
# Compute 2D coordinates
AllChem.Compute2DCoords(m)
# Save sdf file
w = Chem.SDWriter(os.path.join(temp_dir, "temp.sdf"))
w.write(m)
w.flush()
try:
# Path to perl script
script_path = 'mayachemtools/bin/TopologicalPharmacophoreAtomTripletsFingerprints.pl'
command = "perl " + script_path + " -r " + os.path.join(temp_dir,"temp") + " --AtomTripletsSetSizeToUse FixedSize -v ValuesString -o " + os.path.join(temp_dir, "temp.sdf")
os.system(command)
with open(os.path.join(temp_dir, "temp.csv"), 'r') as f:
for line in f.readlines():
if "Cmpd" in line:
line = line.split(';')[5].replace('"', '')
features = [int(i) for i in line.split(" ")]
except:
features = None
# Delete the temporary directory
shutil.rmtree(temp_dir)
tpatf_arr = np.array(features, dtype=np.float32)
tpatf_arr = tpatf_arr.reshape(1, tpatf_arr.shape[0])
return tpatf_arr
LocInfo_dict =[
{
"ToxDes": {
"dataset_size": 1662,
"actives": 831,
"inactives": 831,
"cohen_k_test": 0.36,
"roc_auc": 0.68,
"f1_score": 0.679,
"Recall": 0.68,
"accuracy": 0.68,
"Precision": 0.682
},
"ToxFP": {
"dataset_size": 1662,
"actives": 831,
"inactives": 831,
"cohen_k_test": 0.392,
"roc_auc": 0.696,
"f1_score": 0.696,
"Recall": 0.696,
"accuracy": 0.696,
"Precision": 0.698
},
"ToxTopo": {
"dataset_size": 1662,
"actives": 831,
"inactives": 831,
"cohen_k_test": 0.368,
"roc_auc": 0.684,
"f1_score": 0.684,
"Recall": 0.684,
"accuracy": 0.684,
"Precision": 0.684
},
"ActFP": {
"dataset_size": 736,
"actives": 368,
"inactives": 368,
"cohen_k_test": 0.392,
"roc_auc": 0.696,
"f1_score": 0.695,
"Recall": 0.696,
"accuracy": 0.696,
"Precision": 0.698
},
"ActDes": {
"dataset_size": 680,
"actives": 340,
"inactives": 340,
"cohen_k_test": 0.216,
"roc_auc": 0.608,
"f1_score": 0.606,
"Recall": 0.608,
"accuracy": 0.608,
"Precision": 0.609
},
"ActTopo": {
"dataset_size": 680,
"actives": 340,
"inactives": 340,
"cohen_k_test": 0.294,
"roc_auc": 0.647,
"f1_score": 0.647,
"Recall": 0.647,
"accuracy": 0.647,
"Precision": 0.647
}
}
]
nbits = 1024
longbits = 16384
# dictionary
fpFunc_dict = {}
fpFunc_dict['ecfp0'] = lambda m: AllChem.GetMorganFingerprintAsBitVect(m, 0, nBits=nbits)
fpFunc_dict['ecfp2'] = lambda m: AllChem.GetMorganFingerprintAsBitVect(m, 1, nBits=nbits)
fpFunc_dict['ecfp4'] = lambda m: AllChem.GetMorganFingerprintAsBitVect(m, 2, nBits=nbits)
fpFunc_dict['ecfp6'] = lambda m: AllChem.GetMorganFingerprintAsBitVect(m, 3, nBits=nbits)
fpFunc_dict['fcfp2'] = lambda m: AllChem.GetMorganFingerprintAsBitVect(m, 1, useFeatures=True, nBits=nbits)
fpFunc_dict['fcfp4'] = lambda m: AllChem.GetMorganFingerprintAsBitVect(m, 2, useFeatures=True, nBits=nbits)
fpFunc_dict['fcfp6'] = lambda m: AllChem.GetMorganFingerprintAsBitVect(m, 3, useFeatures=True, nBits=nbits)
fpFunc_dict['lecfp4'] = lambda m: AllChem.GetMorganFingerprintAsBitVect(m, 2, nBits=longbits)
fpFunc_dict['lecfp6'] = lambda m: AllChem.GetMorganFingerprintAsBitVect(m, 3, nBits=longbits)
fpFunc_dict['lfcfp4'] = lambda m: AllChem.GetMorganFingerprintAsBitVect(m, 2, useFeatures=True, nBits=longbits)
fpFunc_dict['lfcfp6'] = lambda m: AllChem.GetMorganFingerprintAsBitVect(m, 3, useFeatures=True, nBits=longbits)
fpFunc_dict['maccs'] = lambda m: MACCSkeys.GenMACCSKeys(m)
fpFunc_dict['hashap'] = lambda m: rdMolDescriptors.GetHashedAtomPairFingerprintAsBitVect(m, nBits=nbits)
fpFunc_dict['hashtt'] = lambda m: rdMolDescriptors.GetHashedTopologicalTorsionFingerprintAsBitVect(m, nBits=nbits)
fpFunc_dict['avalon'] = lambda m: fpAvalon.GetAvalonFP(m, nbits)
fpFunc_dict['laval'] = lambda m: fpAvalon.GetAvalonFP(m, longbits)
fpFunc_dict['rdk5'] = lambda m: Chem.RDKFingerprint(m, maxPath=5, fpSize=nbits, nBitsPerHash=2)
fpFunc_dict['rdk6'] = lambda m: Chem.RDKFingerprint(m, maxPath=6, fpSize=nbits, nBitsPerHash=2)
fpFunc_dict['rdk7'] = lambda m: Chem.RDKFingerprint(m, maxPath=7, fpSize=nbits, nBitsPerHash=2)
fpFunc_dict['tpatf'] = lambda m: get_tpatf(m)
fpFunc_dict['rdkDes'] = lambda m: calc.CalcDescriptors(m)
long_fps = {'laval', 'lecfp4', 'lecfp6', 'lfcfp4', 'lfcfp6'}
fps_to_generate = ['fcfp4', 'fcfp2', 'lecfp4', 'lfcfp4', 'rdkDes', 'tpatf', 'rdk5', 'hashtt', 'avalon', 'laval', 'rdk7', 'ecfp4', 'hashap', 'lecfp6', 'maccs']
ModFileName_LoadedModel_dict = {}
|
# -*- coding: utf-8 -*-
"""
This code is auto generated from troposphere_mate.code_generator.__init__.py scripts.
"""
import sys
if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover
from typing import Union, List, Any
import troposphere.cloudwatch
from troposphere.cloudwatch import (
Configuration as _Configuration,
Metric as _Metric,
MetricDataQuery as _MetricDataQuery,
MetricDimension as _MetricDimension,
MetricStat as _MetricStat,
Range as _Range,
Tags as _Tags,
)
from troposphere import Template, AWSHelperFn
from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin
from troposphere_mate.core.sentiel import REQUIRED, NOTHING
class MetricDimension(troposphere.cloudwatch.MetricDimension, Mixin):
def __init__(self,
title=None,
Name=REQUIRED, # type: Union[str, AWSHelperFn]
Value=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Name=Name,
Value=Value,
**kwargs
)
super(MetricDimension, self).__init__(**processed_kwargs)
class Metric(troposphere.cloudwatch.Metric, Mixin):
def __init__(self,
title=None,
Dimensions=NOTHING, # type: List[_MetricDimension]
MetricName=NOTHING, # type: Union[str, AWSHelperFn]
Namespace=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Dimensions=Dimensions,
MetricName=MetricName,
Namespace=Namespace,
**kwargs
)
super(Metric, self).__init__(**processed_kwargs)
class MetricStat(troposphere.cloudwatch.MetricStat, Mixin):
def __init__(self,
title=None,
Metric=REQUIRED, # type: _Metric
Period=REQUIRED, # type: int
Stat=REQUIRED, # type: Union[str, AWSHelperFn]
Unit=NOTHING, # type: Any
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Metric=Metric,
Period=Period,
Stat=Stat,
Unit=Unit,
**kwargs
)
super(MetricStat, self).__init__(**processed_kwargs)
class MetricDataQuery(troposphere.cloudwatch.MetricDataQuery, Mixin):
def __init__(self,
title=None,
Id=REQUIRED, # type: Union[str, AWSHelperFn]
Expression=NOTHING, # type: Union[str, AWSHelperFn]
Label=NOTHING, # type: Union[str, AWSHelperFn]
MetricStat=NOTHING, # type: _MetricStat
Period=NOTHING, # type: int
ReturnData=NOTHING, # type: bool
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Id=Id,
Expression=Expression,
Label=Label,
MetricStat=MetricStat,
Period=Period,
ReturnData=ReturnData,
**kwargs
)
super(MetricDataQuery, self).__init__(**processed_kwargs)
class Alarm(troposphere.cloudwatch.Alarm, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
ComparisonOperator=REQUIRED, # type: Union[str, AWSHelperFn]
EvaluationPeriods=REQUIRED, # type: int
ActionsEnabled=NOTHING, # type: bool
AlarmActions=NOTHING, # type: List[Union[str, AWSHelperFn]]
AlarmDescription=NOTHING, # type: Union[str, AWSHelperFn]
AlarmName=NOTHING, # type: Union[str, AWSHelperFn]
DatapointsToAlarm=NOTHING, # type: int
Dimensions=NOTHING, # type: List[_MetricDimension]
EvaluateLowSampleCountPercentile=NOTHING, # type: Union[str, AWSHelperFn]
ExtendedStatistic=NOTHING, # type: Union[str, AWSHelperFn]
InsufficientDataActions=NOTHING, # type: List[Union[str, AWSHelperFn]]
MetricName=NOTHING, # type: Union[str, AWSHelperFn]
Metrics=NOTHING, # type: List[_MetricDataQuery]
Namespace=NOTHING, # type: Union[str, AWSHelperFn]
OKActions=NOTHING, # type: List[Union[str, AWSHelperFn]]
Period=NOTHING, # type: int
Statistic=NOTHING, # type: Union[str, AWSHelperFn]
Threshold=NOTHING, # type: float
ThresholdMetricId=NOTHING, # type: Union[str, AWSHelperFn]
TreatMissingData=NOTHING, # type: Any
Unit=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
ComparisonOperator=ComparisonOperator,
EvaluationPeriods=EvaluationPeriods,
ActionsEnabled=ActionsEnabled,
AlarmActions=AlarmActions,
AlarmDescription=AlarmDescription,
AlarmName=AlarmName,
DatapointsToAlarm=DatapointsToAlarm,
Dimensions=Dimensions,
EvaluateLowSampleCountPercentile=EvaluateLowSampleCountPercentile,
ExtendedStatistic=ExtendedStatistic,
InsufficientDataActions=InsufficientDataActions,
MetricName=MetricName,
Metrics=Metrics,
Namespace=Namespace,
OKActions=OKActions,
Period=Period,
Statistic=Statistic,
Threshold=Threshold,
ThresholdMetricId=ThresholdMetricId,
TreatMissingData=TreatMissingData,
Unit=Unit,
**kwargs
)
super(Alarm, self).__init__(**processed_kwargs)
class Dashboard(troposphere.cloudwatch.Dashboard, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
DashboardBody=REQUIRED, # type: Union[Union[str, AWSHelperFn], dict]
DashboardName=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
DashboardBody=DashboardBody,
DashboardName=DashboardName,
**kwargs
)
super(Dashboard, self).__init__(**processed_kwargs)
class Range(troposphere.cloudwatch.Range, Mixin):
def __init__(self,
title=None,
EndTime=REQUIRED, # type: Union[str, AWSHelperFn]
StartTime=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
EndTime=EndTime,
StartTime=StartTime,
**kwargs
)
super(Range, self).__init__(**processed_kwargs)
class Configuration(troposphere.cloudwatch.Configuration, Mixin):
def __init__(self,
title=None,
ExcludedTimeRanges=NOTHING, # type: List[_Range]
MetricTimeZone=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
ExcludedTimeRanges=ExcludedTimeRanges,
MetricTimeZone=MetricTimeZone,
**kwargs
)
super(Configuration, self).__init__(**processed_kwargs)
class AnomalyDetector(troposphere.cloudwatch.AnomalyDetector, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
MetricName=REQUIRED, # type: Union[str, AWSHelperFn]
Namespace=REQUIRED, # type: Union[str, AWSHelperFn]
Stat=REQUIRED, # type: Union[str, AWSHelperFn]
Configuration=NOTHING, # type: _Configuration
Dimensions=NOTHING, # type: List[_MetricDimension]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
MetricName=MetricName,
Namespace=Namespace,
Stat=Stat,
Configuration=Configuration,
Dimensions=Dimensions,
**kwargs
)
super(AnomalyDetector, self).__init__(**processed_kwargs)
class InsightRule(troposphere.cloudwatch.InsightRule, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
RuleBody=REQUIRED, # type: Union[str, AWSHelperFn]
RuleName=REQUIRED, # type: Union[str, AWSHelperFn]
RuleState=REQUIRED, # type: Union[str, AWSHelperFn]
Tags=NOTHING, # type: _Tags
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
RuleBody=RuleBody,
RuleName=RuleName,
RuleState=RuleState,
Tags=Tags,
**kwargs
)
super(InsightRule, self).__init__(**processed_kwargs)
class CompositeAlarm(troposphere.cloudwatch.CompositeAlarm, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
AlarmName=REQUIRED, # type: Union[str, AWSHelperFn]
AlarmRule=REQUIRED, # type: Union[str, AWSHelperFn]
ActionsEnabled=NOTHING, # type: bool
AlarmActions=NOTHING, # type: List[Union[str, AWSHelperFn]]
AlarmDescription=NOTHING, # type: Union[str, AWSHelperFn]
InsufficientDataActions=NOTHING, # type: List[Union[str, AWSHelperFn]]
OKActions=NOTHING, # type: List[Union[str, AWSHelperFn]]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
AlarmName=AlarmName,
AlarmRule=AlarmRule,
ActionsEnabled=ActionsEnabled,
AlarmActions=AlarmActions,
AlarmDescription=AlarmDescription,
InsufficientDataActions=InsufficientDataActions,
OKActions=OKActions,
**kwargs
)
super(CompositeAlarm, self).__init__(**processed_kwargs)
|
from math import sqrt, ceil
r, x, y, nx, ny = map(int, input().split())
print(ceil(sqrt((nx - x)**2 + (ny - y)**2) / (2*r)))
|
import json
from json import JSONDecodeError
from urllib.parse import quote
import requests
class RestClient:
"""
Use this class to communicate with otto pi navigator
"""
def __init__(self, url=None):
self.end_point_url = 'http://localhost/' if url is None else url
def post(self, path, data):
print(self.end_point_url + path)
r = requests.post(self.end_point_url + quote(path), json=data)
print(r)
def get(self, path):
print(self.end_point_url + path)
r = requests.get(self.end_point_url + quote(path))
try:
data = json.loads(r.text)
return data
except JSONDecodeError:
return None
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 19 22:47:23 2015
@author: Joakim
Python version: 3.4.1
Reads a website, determines wheter a package is old or new.
If package is old, downloads a newer one.
Else, does nothing.
NOTE:
This is a raw version, it does not work in conjunction with other codes.
Yet.
"""
import urllib.request
import urllib.parse
import urllib.error
from lxml import html, etree
import requests
import datetime, time
import os
import shutil
import zipfile
"""Package source website"""
SourcePageAddress = 'http://data.jyvaskyla.fi/data.php'
"""Package dating information address on the webpage in XPath-coding:
NOTE: this is an absolute address, it has to be changed into a dynamic one later"""
PackageDateAddress_XPath = '/html/body/div/div[3]/div[2]/div[1]/div[1]/div[15]/div[5]/br'
#PackageDateAddress_XPath = '/html/body/div/div[3]/div[2]/div[1]/div[1]/div[14]/div[5]/br/text()'
"""Package http-donwload address"""
PackageAddress = 'http://data.jyvaskyla.fi/tiedostot/linkkidata.zip'
linkkidataNames = ['agency.txt','calendar.txt','calendar_dates.txt',
'contracts.txt','routes.txt','stop_times.txt',
'stops.txt','translations.txt', 'trips.txt']
#---------------------------------------------------------------------------
def reader(fileaddress):
"""Reads the contents of a given file into an array"""
tied = []
with open(fileaddress, 'r') as ft:
for line in ft:
if(line[0]!="#"):
tied.append(line)
ft.close()
return tied
def writer(fileaddress, contents, oneline=False):
#fileaddress = filename
if(os.path.exists(fileaddress)):
os.remove(fileaddress)
tied = open(fileaddress, 'w')
if(not oneline):
for line in contents:
tied.write(line+"\n")
else:
tied.write(contents+"\n")
tied.close()
def getCurrentPackageDate(fileaddress):
"""Reads a given file and extracts the current dating
of the linkkidata-package"""
rawUpdateDateData = reader(fileaddress)
currentPackageDate = ((rawUpdateDateData[0]).splitlines())[0]
return currentPackageDate
def checkIfUpdateAvailable(updateDateFileAddress):
"""Checks the Jyväskylä open data website for wheter
there is an update for the linkkidata-package
Returns true if there is, false if there is not"""
try:
lastUpdatedDate = getCurrentPackageDate(updateDateFileAddress)
page = requests.get(SourcePageAddress)
tree = html.fromstring(page.text)
#Read the date from the webpage and clean it up
dating = tree.xpath(PackageDateAddress_XPath)
brcstr = etree.tostring(dating[0])
rcstr = brcstr.decode("utf-8")
cstr=rcstr.replace("\n","").replace("\t","").replace("<br/>","")
print(cstr)
#Form datetime objects for package update status comparison
dLastUpdatedDate = datetime.datetime.\
strptime(lastUpdatedDate,'%d.%m.%Y').date()
dPackageDate = datetime.datetime.strptime(cstr,'%d.%m.%Y').date()
#Check if the package requires updating
if(time.mktime(dPackageDate.timetuple()) > \
time.mktime(dLastUpdatedDate.timetuple())):
return True,cstr
else:
return False,cstr
except:
return -1,"1.1.2000"
def downloadUpdatedPackage(PackageAddress =
'http://data.jyvaskyla.fi/tiedostot/linkkidata.zip'):
"""Downloads a zip package from the given address.
Default is the linkkidata-package address. """
try:
f = urllib.request.urlopen(PackageAddress)
data = f.read()
with open("linkkidata.zip", "wb") as code:
code.write(data)
return 1
except:
return -1
def unzip(source_filename, dest_dir):
"""Unzips a source file into destination file"""
#Source:
# https://stackoverflow.com/questions/12886768/how-to-unzip-file-in-python-on-all-oses
with zipfile.ZipFile(source_filename) as zf:
for member in zf.infolist():
# Path traversal defense copied from
# http://hg.python.org/cpython/file/tip/Lib/http/server.py#l789
words = member.filename.split('/')
path = dest_dir
for word in words[:-1]:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir, ''): continue
path = os.path.join(path, word)
zf.extract(member, path)
def formFileTreeFromZipPackage():#(ZipPackageAddress = 'oletusosoite'):
"""Locates the current working directory, creates the
unzipped data tree or updates the old data tree"""
cwd = os.getcwd().replace('\\','/')
dataAddress = cwd+"/linkkidata.zip"
writeDataAddress = cwd+"/linkkidata"
#print(writeDataAddress)
if(os.path.exists(dataAddress)):
#Extract the zip-package
try:
unzip(dataAddress,writeDataAddress)
return 1
except:
return -1
else:
#There is no zip-package, return error signal
return -1
def updateLinkkiData():
"""Handles the update of the linkkidata-package, if necessary.
Returns 1 if update successful, 0 if no update was needed and -1 if update failed. """
cwd = os.getcwd().replace('\\','/')
updateDateFileAddress = cwd+"/lastUpdateDate.txt"
if not os.path.exists(updateDateFileAddress):
#Last update date file not found, create it
writer(updateDateFileAddress,['1.1.1950'])
#The last update date is found,
requiresUpdate, webPackageDate = checkIfUpdateAvailable(updateDateFileAddress)
if requiresUpdate:
#Update the linkkidata-package
print("Download successful: "+str(downloadUpdatedPackage()))
print("File tree creation: "+str(formFileTreeFromZipPackage()))
print("Write current package date "+webPackageDate+" into file")
if downloadUpdatedPackage() == 1:
if formFileTreeFromZipPackage() == 1:
writer(updateDateFileAddress,[webPackageDate])
return 1
return -1
else:
#Do not update the linkkidata package
return 0
#There has been an error
return -1
res = updateLinkkiData()
print(res)
#TODO: remove this when the package update file is done
print('Done')
|
import copy
from .utils import print_json
'''
Prints out websites list with fixed data
'''
def task_three(websites):
index = 0
incorrect_elements = list()
for website in websites:
try:
if 'https://' in website['url'] and website['secure'] == False:
website['secure'] = True
elif 'http://' in website['url'] and website['secure'] == True:
website['secure'] = False
except:
incorrect_elements.append(index)
print('Website has invalid key(s).')
continue
index += 1
for i in reversed(incorrect_elements):
del websites[i]
print_json(websites)
return websites
|
import auto_derby
from auto_derby import single_mode
class Plugin(auto_derby.Plugin):
"""Earn second name `努力の天才`"""
def install(self) -> None:
class Training(auto_derby.config.single_mode_training_class):
def score(self, ctx: single_mode.Context) -> float:
ret = super().score(ctx)
if self.type == self.TYPE_POWER and self.level < 5:
ret += 5
return ret
auto_derby.config.single_mode_training_class = Training
auto_derby.plugin.register(__name__, Plugin())
|
supervised_learning = False
options = dict(requires_training_frame=False)
def class_extensions():
@staticmethod
def from_file(file=str):
"""
Creates new Generic model by loading existing embedded model into library, e.g. from H2O MOJO.
The imported model must be supported by H2O.
:param file: A string containing path to the file to create the model from
:return: H2OGenericEstimator instance representing the generic model
:examples:
>>> from h2o.estimators import H2OIsolationForestEstimator, H2OGenericEstimator
>>> import tempfile
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/testng/airlines_train.csv")
>>> ifr = H2OIsolationForestEstimator(ntrees=1)
>>> ifr.train(x=["Origin","Dest"], y="Distance", training_frame=airlines)
>>> original_model_filename = tempfile.mkdtemp()
>>> original_model_filename = ifr.download_mojo(original_model_filename)
>>> model = H2OGenericEstimator.from_file(original_model_filename)
>>> model.model_performance()
"""
model = H2OGenericEstimator(path = file)
model.train()
return model
extensions = dict(
__class__=class_extensions,
)
examples = dict(
model_key="""
>>> from h2o.estimators import H2OGenericEstimator, H2OXGBoostEstimator
>>> import tempfile
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/testng/airlines_train.csv")
>>> y = "IsDepDelayed"
>>> x = ["fYear","fMonth","Origin","Dest","Distance"]
>>> xgb = H2OXGBoostEstimator(ntrees=1, nfolds=3)
>>> xgb.train(x=x, y=y, training_frame=airlines)
>>> original_model_filename = tempfile.mkdtemp()
>>> original_model_filename = xgb.download_mojo(original_model_filename)
>>> key = h2o.lazy_import(original_model_filename)
>>> fr = h2o.get_frame(key[0])
>>> model = H2OGenericEstimator(model_key=fr)
>>> model.train()
>>> model.auc()
""",
path="""
>>> from h2o.estimators import H2OIsolationForestEstimator, H2OGenericEstimator
>>> import tempfile
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/testng/airlines_train.csv")
>>> ifr = H2OIsolationForestEstimator(ntrees=1)
>>> ifr.train(x=["Origin","Dest"], y="Distance", training_frame=airlines)
>>> generic_mojo_filename = tempfile.mkdtemp("zip","genericMojo")
>>> generic_mojo_filename = model.download_mojo(path=generic_mojo_filename)
>>> model = H2OGenericEstimator.from_file(generic_mojo_filename)
>>> model.model_performance()
"""
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-15 20:30
from __future__ import unicode_literals
from django.db import migrations, models
import profiles.util
class Migration(migrations.Migration):
dependencies = [
('profiles', '0023_remove_profile_has_profile_image'),
]
operations = [
migrations.AddField(
model_name='profile',
name='image_small',
field=models.ImageField(null=True, upload_to=profiles.util.profile_image_upload_uri_small),
),
]
|
#def fonksiyon():
# a=5
# print(a)
#fonksiyon()
#a=10
#def fonksiyon():
#a=5
#print(a)
#fonksiyon()
#print(a)
a=5
def fonksiyon():
global a
a=2
print(a)
fonksiyon()
print(a)
|
# coding: utf-8
"""
NetBox API
API to access NetBox # noqa: E501
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class VirtualMachineInterface(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'virtual_machine': 'NestedVirtualMachine',
'name': 'str',
'type': 'Type7',
'enabled': 'bool',
'mtu': 'int',
'mac_address': 'str',
'description': 'str',
'mode': 'Mode',
'untagged_vlan': 'NestedVLAN',
'tagged_vlans': 'list[NestedVLAN]',
'tags': 'list[str]'
}
attribute_map = {
'id': 'id',
'virtual_machine': 'virtual_machine',
'name': 'name',
'type': 'type',
'enabled': 'enabled',
'mtu': 'mtu',
'mac_address': 'mac_address',
'description': 'description',
'mode': 'mode',
'untagged_vlan': 'untagged_vlan',
'tagged_vlans': 'tagged_vlans',
'tags': 'tags'
}
def __init__(self, id=None, virtual_machine=None, name=None, type=None, enabled=None, mtu=None, mac_address=None, description=None, mode=None, untagged_vlan=None, tagged_vlans=None, tags=None): # noqa: E501
"""VirtualMachineInterface - a model defined in Swagger""" # noqa: E501
self._id = None
self._virtual_machine = None
self._name = None
self._type = None
self._enabled = None
self._mtu = None
self._mac_address = None
self._description = None
self._mode = None
self._untagged_vlan = None
self._tagged_vlans = None
self._tags = None
self.discriminator = None
if id is not None:
self.id = id
self.virtual_machine = virtual_machine
self.name = name
if type is not None:
self.type = type
if enabled is not None:
self.enabled = enabled
if mtu is not None:
self.mtu = mtu
if mac_address is not None:
self.mac_address = mac_address
if description is not None:
self.description = description
if mode is not None:
self.mode = mode
if untagged_vlan is not None:
self.untagged_vlan = untagged_vlan
if tagged_vlans is not None:
self.tagged_vlans = tagged_vlans
if tags is not None:
self.tags = tags
@property
def id(self):
"""Gets the id of this VirtualMachineInterface. # noqa: E501
:return: The id of this VirtualMachineInterface. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this VirtualMachineInterface.
:param id: The id of this VirtualMachineInterface. # noqa: E501
:type: int
"""
self._id = id
@property
def virtual_machine(self):
"""Gets the virtual_machine of this VirtualMachineInterface. # noqa: E501
:return: The virtual_machine of this VirtualMachineInterface. # noqa: E501
:rtype: NestedVirtualMachine
"""
return self._virtual_machine
@virtual_machine.setter
def virtual_machine(self, virtual_machine):
"""Sets the virtual_machine of this VirtualMachineInterface.
:param virtual_machine: The virtual_machine of this VirtualMachineInterface. # noqa: E501
:type: NestedVirtualMachine
"""
if virtual_machine is None:
raise ValueError("Invalid value for `virtual_machine`, must not be `None`") # noqa: E501
self._virtual_machine = virtual_machine
@property
def name(self):
"""Gets the name of this VirtualMachineInterface. # noqa: E501
:return: The name of this VirtualMachineInterface. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this VirtualMachineInterface.
:param name: The name of this VirtualMachineInterface. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if name is not None and len(name) > 64:
raise ValueError("Invalid value for `name`, length must be less than or equal to `64`") # noqa: E501
if name is not None and len(name) < 1:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def type(self):
"""Gets the type of this VirtualMachineInterface. # noqa: E501
:return: The type of this VirtualMachineInterface. # noqa: E501
:rtype: Type7
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this VirtualMachineInterface.
:param type: The type of this VirtualMachineInterface. # noqa: E501
:type: Type7
"""
self._type = type
@property
def enabled(self):
"""Gets the enabled of this VirtualMachineInterface. # noqa: E501
:return: The enabled of this VirtualMachineInterface. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this VirtualMachineInterface.
:param enabled: The enabled of this VirtualMachineInterface. # noqa: E501
:type: bool
"""
self._enabled = enabled
@property
def mtu(self):
"""Gets the mtu of this VirtualMachineInterface. # noqa: E501
:return: The mtu of this VirtualMachineInterface. # noqa: E501
:rtype: int
"""
return self._mtu
@mtu.setter
def mtu(self, mtu):
"""Sets the mtu of this VirtualMachineInterface.
:param mtu: The mtu of this VirtualMachineInterface. # noqa: E501
:type: int
"""
if mtu is not None and mtu > 65536: # noqa: E501
raise ValueError("Invalid value for `mtu`, must be a value less than or equal to `65536`") # noqa: E501
if mtu is not None and mtu < 1: # noqa: E501
raise ValueError("Invalid value for `mtu`, must be a value greater than or equal to `1`") # noqa: E501
self._mtu = mtu
@property
def mac_address(self):
"""Gets the mac_address of this VirtualMachineInterface. # noqa: E501
:return: The mac_address of this VirtualMachineInterface. # noqa: E501
:rtype: str
"""
return self._mac_address
@mac_address.setter
def mac_address(self, mac_address):
"""Sets the mac_address of this VirtualMachineInterface.
:param mac_address: The mac_address of this VirtualMachineInterface. # noqa: E501
:type: str
"""
self._mac_address = mac_address
@property
def description(self):
"""Gets the description of this VirtualMachineInterface. # noqa: E501
:return: The description of this VirtualMachineInterface. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this VirtualMachineInterface.
:param description: The description of this VirtualMachineInterface. # noqa: E501
:type: str
"""
if description is not None and len(description) > 200:
raise ValueError("Invalid value for `description`, length must be less than or equal to `200`") # noqa: E501
self._description = description
@property
def mode(self):
"""Gets the mode of this VirtualMachineInterface. # noqa: E501
:return: The mode of this VirtualMachineInterface. # noqa: E501
:rtype: Mode
"""
return self._mode
@mode.setter
def mode(self, mode):
"""Sets the mode of this VirtualMachineInterface.
:param mode: The mode of this VirtualMachineInterface. # noqa: E501
:type: Mode
"""
self._mode = mode
@property
def untagged_vlan(self):
"""Gets the untagged_vlan of this VirtualMachineInterface. # noqa: E501
:return: The untagged_vlan of this VirtualMachineInterface. # noqa: E501
:rtype: NestedVLAN
"""
return self._untagged_vlan
@untagged_vlan.setter
def untagged_vlan(self, untagged_vlan):
"""Sets the untagged_vlan of this VirtualMachineInterface.
:param untagged_vlan: The untagged_vlan of this VirtualMachineInterface. # noqa: E501
:type: NestedVLAN
"""
self._untagged_vlan = untagged_vlan
@property
def tagged_vlans(self):
"""Gets the tagged_vlans of this VirtualMachineInterface. # noqa: E501
:return: The tagged_vlans of this VirtualMachineInterface. # noqa: E501
:rtype: list[NestedVLAN]
"""
return self._tagged_vlans
@tagged_vlans.setter
def tagged_vlans(self, tagged_vlans):
"""Sets the tagged_vlans of this VirtualMachineInterface.
:param tagged_vlans: The tagged_vlans of this VirtualMachineInterface. # noqa: E501
:type: list[NestedVLAN]
"""
self._tagged_vlans = tagged_vlans
@property
def tags(self):
"""Gets the tags of this VirtualMachineInterface. # noqa: E501
:return: The tags of this VirtualMachineInterface. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this VirtualMachineInterface.
:param tags: The tags of this VirtualMachineInterface. # noqa: E501
:type: list[str]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VirtualMachineInterface, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VirtualMachineInterface):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from language.utils.log import get_logger
logger = get_logger()
class Evaluator:
def __init__(self, evaluator_cfg, val_dataset):
"""
:param evaluator_cfg: dict, evaluator config.
:param val_dataset: torch dataset.
"""
logger.info(f'Setup evaluator...')
self._cfg = evaluator_cfg
self._impossible = evaluator_cfg.impossible
self._setup_data(val_dataset)
def _setup_data(self, val_dataset):
batch_size = self._cfg.device.batch_size
num_worker = self._cfg.device.num_worker
logger.info(f'batch_size: {batch_size}')
logger.info(f'num_workers: {num_worker}')
self._total_steps = len(val_dataset) // batch_size
self._val_iter = DataLoader(val_dataset, shuffle=False,
batch_size=batch_size, num_workers=num_worker,
drop_last=False)
def evaluate(self, model):
model.eval()
logger.info(f'Evaluating...')
total_amount = 0
correct = 0
for data in tqdm(self._val_iter, total=self._total_steps):
self._to_device(data)
with torch.no_grad():
no_answer, start_pred, end_pred = model.forward_infer(data)
start = data['start']
end = data['end']
for i in range(start.shape[0]):
total_amount += 1
if self._impossible and no_answer[i]:
if start[i, 0] == 0:
correct += 1
else:
if any([s == start_pred[i] and e == end_pred[i] for s, e in zip(start[i], end[i])]):
correct += 1
em = correct / total_amount
return {'EM': em}
def _to_device(self, data):
"""Move a batch to specified device."""
device = self._cfg.device.name
for key, value in data.items():
if isinstance(value, torch.Tensor):
data[key] = value.to(device)
|
from datetime import date
from dateutil.relativedelta import relativedelta
from unittest import TestCase
from todone.parser.format import (
ApplyFunctionFormat,
DateFormat,
PassthroughFormat,
)
class TestPassthroughFormat(TestCase):
def test_values_are_left_untouched(self):
formatter = PassthroughFormat()
value = ['a', 'b', 'C']
output = formatter.format(value)
self.assertEqual(value, output)
def test_empty_list_returns_empty_list(self):
formatter = PassthroughFormat()
output = formatter.format([])
self.assertEqual(output, [])
class TestApplyFunctionFormat(TestCase):
def test_format_function_is_applied_to_value(self):
class MockFormatFunction():
def __init__(self):
self.call_list = []
def mock_format(self, value):
self.call_list.append(value)
return value
mock_ff = MockFormatFunction()
formatter = ApplyFunctionFormat(format_function=mock_ff.mock_format)
value = ['arg1', 'arg2']
formatter.format(value)
self.assertEqual(mock_ff.call_list, [value, ])
class TestDateFormat(TestCase):
def test_no_date_offset_returns_max_date(self):
max_date = date(9999, 12, 31)
formatter = DateFormat()
match = MockDateMatch()
output = formatter.format([match, ])
self.assertEqual(output, max_date)
def test_day_offset_shifts_date_by_correct_amount(self):
offset = date.today()
offset += relativedelta(days=5)
formatter = DateFormat()
match = MockDateMatch(5, 'd')
output = formatter.format([match, ])
self.assertEqual(output, offset)
def test_week_offset_shifts_date_by_correct_amount(self):
offset = date.today()
offset += relativedelta(weeks=5)
formatter = DateFormat()
match = MockDateMatch(5, 'w')
output = formatter.format([match, ])
self.assertEqual(output, offset)
def test_month_offset_shifts_date_by_correct_amount(self):
offset = date.today()
offset += relativedelta(months=5)
formatter = DateFormat()
match = MockDateMatch(5, 'm')
output = formatter.format([match, ])
self.assertEqual(output, offset)
def test_year_offset_shifts_date_by_correct_amount(self):
offset = date.today()
offset += relativedelta(years=5)
formatter = DateFormat()
match = MockDateMatch(5, 'y')
output = formatter.format([match, ])
self.assertEqual(output, offset)
class MockDateMatch():
def __init__(self, offset=None, interval=None):
self.offset = offset
self.interval = interval
def groups(self):
if self.offset and self.interval:
return (
'due', '+',
'{}'.format(self.offset),
'{}'.format(self.interval)
)
else:
return ('due', )
def group(self, index):
if index == 0:
if self.offset and self.interval:
return 'due+{}{}'.format(self.offset, self.interval)
else:
return 'due'
if index == 1 or index == 'name':
return 'due'
if not (self.offset and self.interval):
raise IndexError
if index == 2 or index == 'sign':
return '+'
if index == 3 or index == 'offset':
return '{}'.format(self.offset)
if index == 4 or index == 'interval':
return '{}'.format(self.interval)
raise IndexError
|
import os
def get_last_modified(directory):
if os.path.isfile(directory):
return os.path.getmtime(directory)
else:
return None
# does not check for circular dependencies
def _recurse_dependency_list(project_dir, dependency_dict, files_to_check):
if files_to_check:
for file in files_to_check:
if "last_modified" not in dependency_dict[file] or dependency_dict[file]["last_modified"] is None:
dependency_dict[file]["last_modified"] = get_last_modified(project_dir + file)
# if dependency_dict[file]["last_modified"] is None:
# print("file: {}".format(file))
dependency_dict = \
_recurse_dependency_list(project_dir, dependency_dict, dependency_dict[file]["dependencies"])
return dependency_dict
# resolve dependency change times
# if changes in dependencies, run change function
# return completed dictionary
# paths are in relation to the parent_dir
def change_check(project_dir, dependency_dict, files_to_check, change_func=None, required_items=None):
dependency_dict = _recurse_dependency_list(project_dir, dependency_dict, files_to_check)
for file in files_to_check:
info = dependency_dict[file]
last_modified = info["last_modified"]
dep_changes = []
for dependency in dependency_dict[file]["dependencies"]:
dependency_info = dependency_dict[dependency]
dep_last_modified = dependency_info["last_modified"]
if last_modified is None or dep_last_modified is None or dep_last_modified - last_modified > 0.1:
dep_changes.append(dependency)
if dep_changes and change_func is not None:
change_func(required_items, file, dep_changes)
dependency_dict[file]["changes"] = dep_changes
return dependency_dict
|
# coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Focal loss for imbalanced labels.
C.F.: https://arxiv.org/abs/1708.02002
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
def focal_loss_from_logits(logits, class_labels, alpha, gamma, normalizer=1.0):
"""Compute focal loss for given `logits` and ground truth labels.
Focal loss = (1 - p_t)^r * Cross_Entropy * alpha / normalizer
where p_t is the prediction probability for each class (0 AND 1), alpha is
the weighting factor, gamma (r) is the modulator power, and the normalizer
scales the loss uniformly.
For positive class, the modulator is:
(1 - p_t)^r = (1 - sigmoid(X))^r = = exp(- r * X - r * log(1 + exp(-X)));
for negative class:
(1 - p_t)^r = exp(-r * log(1 + exp(-X)))
Thus the general form is:
(1 - p_t)^r = exp(-r * X * Y - r * log(1 + exp(-X))),
where -X is the negative logits and Y is the numerical GT class labels (0/1).
Args:
logits: A float tensor of same size as `class_labels`.
class_labels: A int tensor of the same size as logits for binary labels.
alpha: A float scalar scaling factor for the positive class; for the
negative class the factor is (1- alpha).
gamma: A float scalar modulating factor.
normalizer: A float scalar normalizing factor. normalizer should be > 0.
Returns:
focal loss: a tensor of per example focal loss, of the same shape as logits.
cross_entropy loss: a tensor of same shape as logits that can be used for
log_pplex.
"""
tf.debugging.assert_equal(tf.shape(class_labels), tf.shape(logits))
labels = tf.cast(class_labels, tf.float32)
logits = tf.cast(logits, tf.float32)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
logits=logits)
neg_logits = -1.0 * logits
modulator = tf.exp(gamma * labels * neg_logits -
gamma * tf.math.log1p(tf.exp(neg_logits)))
loss = modulator * cross_entropy
alpha_weighted_loss = tf.where(tf.equal(labels, 1.0), alpha * loss,
(1.0 - alpha) * loss)
if normalizer <= 0.0:
raise ValueError('Normalizer in focal_loss must be >= 0.0')
return alpha_weighted_loss / normalizer, cross_entropy
|
import boto3
aws_ebs_client = boto3.client('ebs')
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 16 17:21:49 2021
@author: suhar
"""
# Regresyon genelde tahmindir,
# x bağımsız, y bağımlı
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
data = pd.read_csv('satislar.csv')
aylar = data[['Aylar']]
satislar = data[['Satislar']]
xtr, xte, ytr, yte = train_test_split(aylar, satislar, test_size = 0.33, random_state = 0)
sc = StandardScaler()
Xtr = sc.fit_transform(xtr)
Xte = sc.fit_transform(xte)
Ytr = sc.fit_transform(ytr)
Yte = sc.fit_transform(yte)
lr = LinearRegression()
lr.fit(Xtr, Ytr)
preds = lr.predict(Xte)
rp = sc.inverse_transform(preds) # ters scaling
xtr = xtr.sort_index()
ytr = ytr.sort_index()
plt.scatter(xtr, ytr)
plt.scatter(xte, rp) |
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""This demo connects to a magic light and has it do a colorwheel."""
from rainbowio import colorwheel
import adafruit_ble
import _bleio
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble_magic_light import MagicLightService
# CircuitPython <6 uses its own ConnectionError type. So, is it if available. Otherwise,
# the built in ConnectionError is used.
connection_error = ConnectionError
if hasattr(_bleio, "ConnectionError"):
connection_error = _bleio.ConnectionError
def find_connection():
for connection in radio.connections:
if MagicLightService not in connection:
continue
return connection, connection[MagicLightService]
return None, None
# Start advertising before messing with the display so that we can connect immediately.
radio = adafruit_ble.BLERadio()
active_connection, pixels = find_connection()
current_notification = None
app_icon_file = None
while True:
if not active_connection:
print("Scanning for Magic Light")
for scan in radio.start_scan(ProvideServicesAdvertisement):
if MagicLightService in scan.services:
active_connection = radio.connect(scan)
try:
pixels = active_connection[MagicLightService]
except connection_error:
print("disconnected")
continue
break
radio.stop_scan()
i = 0
while active_connection.connected:
pixels[0] = colorwheel(i % 256)
i += 1
active_connection = None
|
import math
PI = math.pi
E = math.e
input = [1, 2, 3, 4]
input_len = len(input)
L = 1
M = input_len
root = math.floor(math.sqrt(input_len))
for i in range(root, 0, -1):
if input_len % i == 0:
L = i
M = int(input_len / i)
break
input_2d = []
output_2d = []
for i in range(L):
input_2d.append(input[i::L])
output_2d.append([0 for _ in range(M)])
for p in range(L):
for q in range(M):
pq_result = 0
for l in range(L):
factor_1 = E ** -(2j * PI * l * q / input_len)
inner_output = 0
for m in range(M):
w_m = E ** -(2j * PI * m*q / M)
inner_output += (input_2d[l][m] * w_m)
factor_2 = E ** -(2j * PI * l * p / L)
pq_result += (factor_1 * inner_output * factor_2)
output_2d[p][q] = pq_result
output = []
for row in output_2d:
output.extend(row)
print(output) |
# coding: utf8
import asyncio
import logging
import time
from copy import deepcopy
from typing import List
import pywikibot
from pywikibot import output
from api import entryprocessor
from api.config import BotjagwarConfig
from api.model.word import Entry
from api.output import Output
from api.servicemanager import DictionaryServiceManager
from redis_wikicache import RedisPage as Page, RedisSite as Site
from .functions import translate_form_of_templates
from .functions import translate_using_convergent_definition
from .functions.pronunciation import translate_pronunciation
from .functions.references import translate_references
from .types import \
UntranslatedDefinition, \
TranslatedDefinition
log = logging.getLogger(__name__)
URL_HEAD = DictionaryServiceManager().get_url_head()
translation_methods = [
translate_using_convergent_definition,
# translate_using_bridge_language,
# translate_using_postgrest_json_dictionary,
translate_form_of_templates
]
already_visited = []
class TranslatedPagePushError(Exception):
pass
class Translation:
working_wiki_language = 'mg'
def __init__(self):
"""
Translates pages into Malagasy
"""
super(self.__class__, self).__init__()
self.output = Output()
self.loop = asyncio.get_event_loop()
self.config = BotjagwarConfig()
def _save_translation_from_page(self, infos: List[Entry]):
"""
Update database and translation methods
"""
for info in infos:
self.output.db(info)
self.output.add_translation_method(info)
@staticmethod
def add_wiktionary_credit(
entries: List[Entry],
wiki_page: [pywikibot.Page, Page]) -> List[Entry]:
reference = "{{wikibolana|" + wiki_page.site.lang + \
'|' + wiki_page.title() + '}}'
out_entries = []
for entry in entries:
if hasattr(entry, 'reference'):
if isinstance(entry.reference, list):
entry.reference.append(reference)
else:
entry.reference = [reference]
else:
entry.reference = [reference]
out_entries.append(entry)
return out_entries
def generate_summary(self, entries: List[Entry]):
summary = 'Dikanteny: '
summary += ', '.join(
sorted(list(set([f'{entry.language.lower()}' for entry in entries]))))
return summary
def check_if_page_exists(self, lemma):
page = Page(
Site(
self.working_wiki_language,
'wiktionary'),
lemma,
offline=False)
return page.exists()
@staticmethod
def aggregate_entry_data(
entries_translated: List[Entry],
entries_already_existing: List[Entry]) -> List[Entry]:
aggregated_entries = []
for translated in entries_translated:
for existing in entries_already_existing:
# same spelling and language and part of speech
if existing.language == translated.language and \
existing.part_of_speech == translated.part_of_speech:
aggregated_entries.append(existing.overlay(translated))
# if translated not in aggregated_entries:
# aggregated_entries.append(translated)
return aggregated_entries
def publish_to_wiktionary(self, page_title: str, entries: List[Entry]):
"""
Push translated data and if possible avoid any information loss
on target wiki if information is not filled in
"""
site = Site(self.working_wiki_language, 'wiktionary')
target_page = Page(site, page_title, offline=False)
# log.debug(self.output.wikipages(entries))
if target_page.namespace().id != 0:
raise TranslatedPagePushError(
f'Attempted to push translated page to {target_page.namespace().custom_name} '
f'namespace (ns:{target_page.namespace().id}). '
f'Can only push to ns:0 (main namespace)')
elif target_page.isRedirectPage():
pass
target_page.put(
self.output.wikipages(entries),
self.generate_summary(entries))
else:
# Get entries to aggregate
if target_page.exists():
wiktionary_processor_class = entryprocessor.WiktionaryProcessorFactory.create(
self.working_wiki_language)
wiktionary_processor = wiktionary_processor_class()
wiktionary_processor.set_text(target_page.get())
wiktionary_processor.set_title(page_title)
already_present_entries = wiktionary_processor.getall()
# entries = self.aggregate_entry_data(entries, already_present_entries)
content = self.output.wikipages(entries)
# Push aggregated content
output(
'**** \03{yellow}' +
target_page.title() +
'\03{default} ****')
output(
'\03{white}' +
self.generate_summary(entries) +
'\03{default}')
output('\03{green}' + content + '\03{default}')
output('\03{yellow}--------------\03{default}')
if self.config.get('ninja_mode', 'translator') == '1':
if target_page.exists():
summary = 'nanitsy'
if not target_page.isRedirectPage():
old_content = target_page.get()
if len(content) > len(old_content) * 1.25:
summary = 'nanitatra'
else:
if len(content) > 140:
summary = "Pejy voaforona amin'ny « " + \
content[:137] + '... »'
else:
summary = "Pejy voaforona amin'ny « " + content + ' »'
else:
summary = self.generate_summary(entries)
target_page.put(content, summary)
if self.config.get('ninja_mode', 'translator') == '1':
time.sleep(12)
def create_lemma_if_not_exists(self, wiktionary_processor, definitions, entry):
if hasattr(definitions, 'part_of_speech'):
if definitions.part_of_speech is not None:
entry.part_of_speech = definitions.part_of_speech
if hasattr(definitions, 'lemma') and \
definitions.lemma is not None and \
definitions.lemma not in already_visited:
already_visited.append(definitions.lemma)
if not self.check_if_page_exists(definitions.lemma):
log.debug(f'lemma {definitions.lemma} does not exist. Processing...')
page = Page(Site(wiktionary_processor.language, 'wiktionary'), definitions.lemma)
if page.exists():
self.process_wiktionary_wiki_page(page)
def translate_wiktionary_page(
self,
wiktionary_processor: entryprocessor.WiktionaryProcessor) -> List[Entry]:
"""
Parse Wiktionary page data and translate any content/section that can be translated
"""
entries = wiktionary_processor.getall(
fetch_additional_data=True,
translate_definitions_to_malagasy=True,
human_readable_form_of_definition=True
)
out_entries = []
for entry in entries:
# log.debug(entry)
translated_definition = []
translated_from_definition = []
out_translation_methods = {}
for definition_line in entry.definitions:
refined_definition_lines = wiktionary_processor.refine_definition(
definition_line)
for refined_definition_line in refined_definition_lines:
for t_method in translation_methods:
if entry.part_of_speech is None:
continue
definitions = t_method(
entry.part_of_speech,
refined_definition_line,
wiktionary_processor.language,
self.working_wiki_language,
language=entry.language
)
if isinstance(definitions, UntranslatedDefinition):
continue
elif isinstance(definitions, TranslatedDefinition):
self.create_lemma_if_not_exists(wiktionary_processor, definitions, entry)
for d in definitions.split(','):
translated_definition.append(d.strip())
if d in out_translation_methods:
out_translation_methods[d].append(
t_method.__name__)
else:
out_translation_methods[d] = [
t_method.__name__]
translated_definition += [k.strip()
for k in definitions.split(',')]
translated_from_definition.append(refined_definition_line)
entry_definitions = sorted(list(set(translated_definition)))
out_entry = deepcopy(entry)
out_entry.translated_from_definition = ', '.join(
translated_from_definition)
out_entry.definitions = entry_definitions
out_entry.translated_from_language = wiktionary_processor.language
out_entry.translation_methods = out_translation_methods
for reference_name in ['reference', 'further_reading']:
if hasattr(entry, reference_name):
setattr(out_entry, reference_name, translate_references(getattr(entry, reference_name)))
if hasattr(entry, 'pronunciation'):
out_entry.pronunciation = translate_pronunciation(entry.pronunciation)
if entry_definitions:
out_entries.append(out_entry)
out_entries.sort()
return out_entries
def process_wiktionary_wiki_page(self, wiki_page: [Page, pywikibot.Page]):
if not wiki_page.namespace().content:
return
language = wiki_page.site.lang
wiktionary_processor_class = entryprocessor.WiktionaryProcessorFactory.create(
language)
wiktionary_processor = wiktionary_processor_class()
if not wiki_page.isRedirectPage():
wiktionary_processor.set_text(wiki_page.get())
wiktionary_processor.set_title(wiki_page.title())
else:
return self.process_wiktionary_wiki_page(
wiki_page.getRedirectTarget())
try:
out_entries = self.translate_wiktionary_page(wiktionary_processor)
out_entries = Translation.add_wiktionary_credit(
out_entries, wiki_page)
ret = self.output.wikipages(out_entries)
if ret != '':
log.debug('out_entries>' + str(out_entries))
self.publish_to_wiktionary(wiki_page.title(), out_entries)
self._save_translation_from_page(out_entries)
return len(out_entries)
else:
return 0
except Exception as exc:
log.exception(exc)
return -1
|
from ... import Printable
from . import FieldType
class FieldValue(Printable):
field_value: None
ratio: 0
count: 0
next_field: FieldType
def get_field_value(self):
return self.field_value
|
"""
Script that perform the first-level analysis of a dataset of the FIAC
Last updated by B.Thirion
Author : Lise Favre, Bertrand Thirion, 2008-2009
"""
import os
from configobj import ConfigObj
from os.path import join
import glob
import GLMTools
import Contrast
# -----------------------------------------------------------
# --------- Set the paths -----------------------------------
#-----------------------------------------------------------
DBPath = "/volatile/thirion/Localizer"
Subjects = ["s12069"]#["s12277", "s12069","s12300","s12401","s12431","s12508","s12532","s12635","s12636","s12826","s12898","s12913","s12919","s12920"]#
Acquisitions = ["acquisition"]
Sessions = ["loc1"]
fmri = "fMRI/"
t1mri = "anatomy"
glmDir = "glm"
contrastDir = "Contrast"
minfDir = "Minf"
# ---------------------------------------------------------
# -------- General Information ----------------------------
# ---------------------------------------------------------
TR = 2.4
nbFrames = 128
Conditions = [ 'damier_H', 'damier_V', 'clicDaudio', 'clicGaudio',
'clicDvideo', 'clicGvideo', 'calculaudio', 'calculvideo', 'phrasevideo',
'phraseaudio' ]
paths = {}
paths["Z map"] = "z_map"
paths["Student-t tests"] = "T_map"
paths["Fisher tests"] = "F_map"
paths["Residual variance"] = "ResMS"
paths["contrast definition"] = "con"
paths["HTML results"] = "html"
# ---------------------------------------------------------
# ------ First level analysis parameters ---------------------
# ---------------------------------------------------------
#---------- Masking parameters
infTh = 0.2
supTh = 0.9
#---------- Design Matrix
# Possible choices for hrfType : "Canonical", \
# "Canonical With Derivative" or "FIR Model"
hrfType = "Canonical With Derivative"
# Possible choices for drift : "Blank", "Cosine", "Polynomial"
drift = "Cosine"
# If drift is "Polynomial"
poly_order = 3
# If drift is "Cosine"
cos_FreqCut = 128
# If hrfType is "FIR Model"
FIR_order = 1
FIR_length = 1
# If the following in not none it will be considered to be the drift
drift_matrix = None
DmtxParam= {}
DmtxParam["hrfType"] = hrfType
DmtxParam["drift"] = drift
DmtxParam["poly_order"] = poly_order
DmtxParam["cos_FreqCut"] = cos_FreqCut
DmtxParam["FIR_order"] = FIR_order
DmtxParam["FIR_length"] = FIR_length
DmtxParam["drift_matrix"] = drift_matrix
#-------------- GLM options
# Possible choices : "Kalman_AR1", "Kalman", "Ordinary Least Squares"
fit_algo = "Kalman_AR1"
#-------------- Contrast Options
# Possible choices : "Contrast Name" or "Contrast Number"
save_mode = "Contrast Name"
# ------------------------------------------------------------------
# Launching Pipeline on all subjects, all acquisitions, all sessions
# -------------------------------------------------------------------
# fixme : normally all that part should be data-independent,
# i.e. a standard user should not have to look at it
# which means that the paths are completely set at that point
# Treat sequentially all subjects & acquisitions
for s in Subjects:
print "Subject : %s" % s
SubjectPath = os.sep.join((DBPath, s))
for a in Acquisitions:
# step 0. set all the paths
# all the paths that arenot session dependent
miscPath = os.sep.join((SubjectPath, fmri, a, minfDir))
paradigmFile = os.sep.join((miscPath, "paradigm.csv"))
miscFile = os.sep.join((miscPath, "misc_info.con"))
maskFile = os.sep.join((SubjectPath, fmri, a, minfDir, "mask.nii"))
paths["Contrasts_path"] = os.sep.join((SubjectPath, fmri, a,
glmDir, contrastDir))
#step 1. Get the fMRI data
fmriFiles = {}
for sess in Sessions:
fmriPath = os.sep.join((SubjectPath, fmri, a, sess))
fmriFiles[sess] = glob.glob(join(fmriPath,'S*.nii'))
# step 2. get the paradigm definition and create misc info file
if not os.path.isfile(paradigmFile):
raise ValueError,"paradigm file %s not found" %paradigmFile
misc = ConfigObj(miscFile)
misc['sessions'] = Sessions
misc['tasks'] = Conditions
misc['mask_url'] = maskFile
misc.write()
# step 3. Create one design matrix for each session
for sess in Sessions:
# Creating Design Matrix
designPath = os.sep.join((SubjectPath, fmri, a, glmDir, sess))
if not os.path.exists(designPath):
os.makedirs(designPath)
designFile = os.sep.join((designPath, "design_mat.csv"))
GLMTools.DesignMatrix(nbFrames, paradigmFile, miscFile,
TR, designFile, sess, DmtxParam)
# step 4. Compute the Mask
# fixme : it should be possible to provide a pre-computed mask
print "Computing the Mask"
GLMTools.ComputeMask(fmriFiles.values()[0][0], maskFile, infTh, supTh)
# step 5. Creating Contrast File
print "Creating Contrasts"
contrast = Contrast.ContrastList(miscFile)
d = contrast.dic
d["audio"] = d["clicDaudio"] + d["clicGaudio"] +\
d["calculaudio"] + d["phraseaudio"]
d["video"] = d["clicDvideo"] + d["clicGvideo"] + \
d["calculvideo"] + d["phrasevideo"]
d["left"] = d["clicGaudio"] + d["clicGvideo"]
d["right"] = d["clicDaudio"] + d["clicDvideo"]
d["computation"] = d["calculaudio"] +d["calculvideo"]
d["sentences"] = d["phraseaudio"] + d["phrasevideo"]
d["H-V"] = d["damier_H"] - d["damier_V"]
d["V-H"] =d["damier_V"] - d["damier_H"]
d["left-right"] = d["left"] - d["right"]
d["right-left"] = d["right"] - d["left"]
d["audio-video"] = d["audio"] - d["video"]
d["video-audio"] = d["video"] - d["audio"]
d["computation-sentences"] = d["computation"] - d["sentences"]
d["reading-visual"] = d["sentences"]*2 - d["damier_H"] - d["damier_V"]
contrastFile = os.sep.join((SubjectPath, fmri, a, minfDir,
"contrast.con"))
contrast.save_dic(contrastFile)
# step 6. Fit the glm for each session
glms = {}
for sess in Sessions:
print "Fitting GLM for session : %s" % sess
glmPath = os.sep.join((SubjectPath, fmri, a, glmDir, sess))
GlmDumpFile = os.sep.join((glmPath, "vba.npz"))
configFile = os.sep.join((glmPath, "vba_config.con"))
designPath = os.sep.join((SubjectPath, fmri, a, glmDir, sess))
designFile = os.sep.join((designPath, "design_mat.csv"))
if os.path.exists(designFile):
GLMTools.GLMFit(fmriFiles[sess], designFile, GlmDumpFile,
configFile, fit_algo, maskFile)
glms[sess] = {}
glms[sess]["GlmDumpFile"] = GlmDumpFile
glms[sess]["ConfigFilePath"] = configFile
#step 7. Compute Contrasts
print "Computing contrasts"
if not os.path.exists(paths["Contrasts_path"]):
os.makedirs(paths["Contrasts_path"])
GLMTools.ComputeContrasts(contrastFile, miscFile, glms,\
save_mode, paths = paths)
|
import gc
import time
from contextlib import contextmanager
from unittest import mock
import objgraph
from dagit.subscription_server import DagsterSubscriptionServer
from dagster import execute_pipeline, pipeline, solid
from dagster.core.test_utils import environ, instance_for_test
from dagster.core.workspace.context import WorkspaceProcessContext
from dagster.core.workspace.load_target import WorkspaceFileTarget
from dagster.utils import file_relative_path
from dagster_graphql.schema import create_schema
from graphql_ws.constants import GQL_CONNECTION_INIT, GQL_CONNECTION_TERMINATE, GQL_START, GQL_STOP
from graphql_ws.gevent import GeventConnectionContext
EVENT_LOG_SUBSCRIPTION = """
subscription PipelineRunLogsSubscription($runId: ID!) {
pipelineRunLogs(runId: $runId) {
__typename
}
}
"""
COMPUTE_LOG_SUBSCRIPTION = """
subscription ComputeLogsSubscription(
$runId: ID!
$stepKey: String!
$ioType: ComputeIOType!
) {
computeLogs(runId: $runId, stepKey: $stepKey, ioType: $ioType) {
__typename
}
}
"""
@contextmanager
def create_subscription_context(instance):
ws = mock.Mock()
yaml_paths = [file_relative_path(__file__, "./workspace.yaml")]
with WorkspaceProcessContext(
instance=instance,
workspace_load_target=WorkspaceFileTarget(paths=yaml_paths),
version="",
read_only=True,
) as process_context:
yield GeventConnectionContext(ws, process_context)
def send_subscription_message(server, context, op, payload=None):
server.on_message(context, {"id": 1, "type": op, "payload": payload or {}})
def start_subscription(server, context, query=None, variables=None):
if query:
start_payload = {
"query": query,
"variables": variables or {},
}
else:
start_payload = {}
send_subscription_message(server, context, GQL_CONNECTION_INIT)
send_subscription_message(server, context, GQL_START, start_payload)
def end_subscription(server, context):
send_subscription_message(server, context, GQL_STOP)
send_subscription_message(server, context, GQL_CONNECTION_TERMINATE)
@solid
def example_solid():
return 1
@pipeline
def example_pipeline():
example_solid()
def test_generic_subscriptions():
server = DagsterSubscriptionServer(schema=None)
with instance_for_test() as instance:
with create_subscription_context(instance) as context:
start_subscription(server, context)
gc.collect()
assert len(objgraph.by_type("SubscriptionObserver")) == 1
end_subscription(server, context)
gc.collect()
assert len(objgraph.by_type("SubscriptionObserver")) == 0
def test_event_log_subscription():
schema = create_schema()
server = DagsterSubscriptionServer(schema=schema)
with instance_for_test() as instance:
run = execute_pipeline(example_pipeline, instance=instance)
assert run.success
assert run.run_id
with create_subscription_context(instance) as context:
start_subscription(server, context, EVENT_LOG_SUBSCRIPTION, {"runId": run.run_id})
gc.collect()
assert len(objgraph.by_type("SubscriptionObserver")) == 1
assert len(objgraph.by_type("PipelineRunObservableSubscribe")) == 1
end_subscription(server, context)
gc.collect()
assert len(objgraph.by_type("SubscriptionObserver")) == 0
assert len(objgraph.by_type("PipelineRunObservableSubscribe")) == 0
def test_event_log_subscription_chunked():
schema = create_schema()
server = DagsterSubscriptionServer(schema=schema)
with instance_for_test() as instance, environ({"DAGIT_EVENT_LOAD_CHUNK_SIZE": "2"}):
run = execute_pipeline(example_pipeline, instance=instance)
assert run.success
assert run.run_id
with create_subscription_context(instance) as context:
start_subscription(server, context, EVENT_LOG_SUBSCRIPTION, {"runId": run.run_id})
gc.collect()
assert len(objgraph.by_type("SubscriptionObserver")) == 1
assert len(objgraph.by_type("PipelineRunObservableSubscribe")) == 1
end_subscription(server, context)
gc.collect()
# give time for bg loading thread to stop
start = time.time()
while time.time() - start < 15:
if len(objgraph.by_type("SubscriptionObserver")) == 0:
break
time.sleep(0.01)
assert len(objgraph.by_type("SubscriptionObserver")) == 0
assert len(objgraph.by_type("PipelineRunObservableSubscribe")) == 0
@mock.patch(
"dagster.core.storage.local_compute_log_manager.LocalComputeLogManager.is_watch_completed"
)
def test_compute_log_subscription(mock_watch_completed):
mock_watch_completed.return_value = False
schema = create_schema()
server = DagsterSubscriptionServer(schema=schema)
with instance_for_test() as instance:
run = execute_pipeline(example_pipeline, instance=instance)
assert run.success
assert run.run_id
with create_subscription_context(instance) as context:
start_subscription(
server,
context,
COMPUTE_LOG_SUBSCRIPTION,
{
"runId": run.run_id,
"stepKey": "example_solid",
"ioType": "STDERR",
},
)
gc.collect()
assert len(objgraph.by_type("SubscriptionObserver")) == 1
assert len(objgraph.by_type("ComputeLogSubscription")) == 1
end_subscription(server, context)
gc.collect()
assert len(objgraph.by_type("SubscriptionObserver")) == 0
assert len(objgraph.by_type("ComputeLogSubscription")) == 0
|
import pygame
import pygame.freetype
class Keys:
def __init__(self):
"""Stores the state of a keyboard"""
self.up = False
self.left = False
self.down = False
self.right = False
self.shift_up = False
self.shift_down = False
class SteeringWheel:
def __init__(self):
"""Stores the state of a steering wheel"""
self.steer = 0
self.throttle = 0
self.brake = 0
self.shift_up = False
self.shift_down = False
class Interface:
def __init__(self, using_steering_wheel):
"""Interface for cummunicating with keyboards and steering wheels"""
pygame.init()
self.screen = pygame.display.set_mode((640, 480))
self.font = pygame.freetype.SysFont('Ubuntu', 30)
self.using_steering_wheel = using_steering_wheel
if self.using_steering_wheel:
self.steering_wheel = self.__init_steering_wheel()[0]
self.steering_wheel_state = SteeringWheel()
else:
self.pressed = Keys()
def __init_steering_wheel(self):
"""Checks for steering wheels and returns an initialized list"""
steering_wheels = []
for wheel_id in range(pygame.joystick.get_count()):
steering_wheels.append(pygame.joystick.Joystick(wheel_id))
steering_wheels[wheel_id].init()
return steering_wheels
def check_key(self, event_type, event_key):
"""Check a single key for a single event type"""
for event in pygame.event.get():
if event.type == event_type:
if event.key == event_key:
return True
return False
def get_key_state(self):
"""Get states of all important keys"""
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
self.pressed.up = True
if event.key == pygame.K_DOWN:
self.pressed.down = True
if event.key == pygame.K_LEFT:
self.pressed.left = True
if event.key == pygame.K_RIGHT:
self.pressed.right = True
if event.key == pygame.K_z:
self.pressed.shift_down = True
if event.key == pygame.K_x:
self.pressed.shift_up = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
self.pressed.up = False
if event.key == pygame.K_DOWN:
self.pressed.down = False
if event.key == pygame.K_LEFT:
self.pressed.left = False
if event.key == pygame.K_RIGHT:
self.pressed.right = False
if event.key == pygame.K_z:
self.pressed.shift_down = False
if event.key == pygame.K_x:
self.pressed.shift_up = False
return self.pressed
def get_steering_wheel_state(self):
"""Get the current state of the steering wheel"""
pygame.event.pump()
self.steering_wheel_state.steer = self.steering_wheel.get_axis(0)
self.steering_wheel_state.throttle = self.steering_wheel.get_axis(2)
self.steering_wheel_state.brake = self.steering_wheel.get_axis(3)
self.steering_wheel_state.shift_up = \
bool(self.steering_wheel.get_button(4))
self.steering_wheel_state.shift_down = \
bool(self.steering_wheel.get_button(5))
return self.steering_wheel_state
def display_act(self, act):
"""Display the current action on the interface"""
accel = "ACCEL: " + str(act.accel)
brake = "BRAKE: " + str(act.brake)
gear = "GEAR: " + str(act.gear)
steer = "STEER: " + str(act.steer)
self.screen.fill((0, 0, 0))
accel, rect_accel = self.font.render(accel, (255, 255, 255))
brake, rect_brake = self.font.render(brake, (255, 255, 255))
gear, rect_gear = self.font.render(gear, (255, 255, 255))
steer, rect_steer = self.font.render(steer, (255, 255, 255))
self.screen.blit(accel, (10, 10))
self.screen.blit(brake, (10, rect_brake[1] + 20))
self.screen.blit(gear, (10, 2 * rect_gear[1] + 30))
self.screen.blit(steer, (10, 3 * rect_steer[1] + 40))
pygame.display.flip()
|
"""
Example code for using Elpis/Kaldi from Python
Start Elpis Docker container, share a volume containing your dataset, e.g.:
docker run --rm -it -p 5001:5001/tcp \
-v ~/sandbox/datasets:/datasets \
-v ~/sandbox/state:/state \
-v ~/sandbox/elpis:/elpis \
--entrypoint /bin/zsh \
coedl/elpis:latest
Change dataset dir values etc below to suit your data.
Run the data preparation scripts and do training by calling this script from the /elpis dir.
python elpis/examples/cli/kaldi/train.py
"""
from elpis.engines.common.objects.interface import Interface
from pathlib import Path
DATASET_DIR = '/datasets/abui/transcribed'
DATASET_NAME = 'ds'
IMPORTER_METHOD = 'tier_name'
IMPORTER_VALUE = 'Phrase'
L2S_PATH = '/datasets/abui/letter_to_sound.txt'
PRON_DICT_NAME = 'pd'
MODEL_NAME = 'mx'
TX_NAME = 'tx'
INFER_FILE_PATH = '/datasets/abui/untranscribed/audio.wav'
# Step 0
# ======
# Create a Kaldi interface directory (where all the associated files/objects
# will be stored).
elpis = Interface(path=Path('/state'), use_existing=True)
# Step 1
# ======
# Select Engine
from elpis.engines import ENGINES
engine = ENGINES['kaldi']
elpis.set_engine(engine)
# Step 2
# ======
# Setup a dataset to to train data on.
# Reuse dataset if it exists
if DATASET_NAME not in elpis.list_datasets():
print("Making new dataset")
ds = elpis.new_dataset(DATASET_NAME)
ds.add_directory(DATASET_DIR, extensions=['eaf', 'wav'])
ds.auto_select_importer() # Selects Elan because of eaf file.
ds.importer.set_setting(IMPORTER_METHOD, IMPORTER_VALUE)
ds.process()
else:
print("Use existing dataset")
ds = elpis.get_dataset(DATASET_NAME)
# Step 3
# ======
# Build pronunciation dictionary
# Reuse pronunciation dictionary if it exists
if PRON_DICT_NAME not in elpis.list_pron_dicts():
print("Making new pron dict")
pd = elpis.new_pron_dict(PRON_DICT_NAME)
pd.link(ds)
pd.set_l2s_path(L2S_PATH)
pd.generate_lexicon()
else:
print("Use existing pron dict")
pd = elpis.get_pron_dict(PRON_DICT_NAME)
# Step 4
# ======
# Link dataset and pd to a new model, then train the model.
# Load model if it exists
if MODEL_NAME not in elpis.list_models():
print("Making new model")
m = elpis.new_model(MODEL_NAME)
m.link_dataset(ds)
m.link_pron_dict(pd)
m.build_structure()
m.train() # may take a while
else:
print("Use existing model")
m = elpis.get_model(MODEL_NAME)
# Step 5
# ======
# Make a transcription interface and transcribe audio.
i = 0
while TX_NAME in elpis.list_transcriptions():
TX_NAME = TX_NAME + str(i)
t = elpis.new_transcription(TX_NAME)
t.link(m)
with open(INFER_FILE_PATH, 'rb') as faudio:
t.prepare_audio(faudio)
t.transcribe()
print(t.text())
|
from typing import TypeVar, Type, BinaryIO
from PIL import Image as PImage
from sprite_unpack.types import Color, Box
ImageType = TypeVar("ImageType", bound="Image")
class Image:
image: PImage
def __init__(self, image: PImage) -> None:
self.image = image.convert("RGBA")
self.pix_data = self.image.load()
@property
def width(self):
return self.image.width
@property
def height(self):
return self.image.height
@classmethod
def from_io(cls: Type[ImageType], file: BinaryIO) -> ImageType:
image = PImage.open(file)
return cls(image)
def color(self, x: int, y: int) -> Color:
return self.pix_data[x, y]
@staticmethod
def color_to_hex(color: Color) -> str:
return "#{0:02x}{1:02x}{2:02x}{3:02x}".format(*color)
def background_color(self) -> Color:
return self.color(0, 0)
def copy(self):
return self.__class__(self.image.copy())
def make_transparent(self, color: Color):
for x in range(self.width):
for y in range(self.height):
if self.pix_data[x, y] == color:
self.pix_data[x, y] = (0, 0, 0, 0)
def subimage(self, box: Box) -> "Image":
((x1, y1), (x2, y2)) = box
if x1 > x2:
(x1, x2) = (x2, x1)
if y1 > y2:
(y1, y2) = (y2, y1)
return self.__class__(self.image.crop((x1, y1, x2 + 1, y2 + 1)))
def mark(self, box: Box, color: Color):
((x1, y1), (x2, y2)) = box
if x1 > x2:
(x1, x2) = (x2, x1)
if y1 > y2:
(y1, y2) = (y2, y1)
for x in range(x1, x2 + 1):
for y in range(y1, y2 + 1):
self.pix_data[x, y] = color
def write(self, file: BinaryIO) -> None:
self.image.save(file, format="PNG")
|
# import cryptography
import argparse
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives import hashes
def rsa_gen_key(keysize:int):
priv= rsa.generate_private_key(public_exponent=65537, key_size=keysize)
return(priv)
def rsa_serialize_keys(private):
priv_serial = private.private_bytes(encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption())
pub_serial = private.public_key().public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo)
return(priv_serial,pub_serial)
def rsa_enc(input_stream,pub_key):
ciphertext = pub_key.encrypt(input_stream,padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),algorithm=hashes.SHA256(),label=None))
return(ciphertext)
def rsa_dec(input_stream, priv_key):
plaintext = priv_key.decrypt(input_stream,padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),algorithm=hashes.SHA256(),label=None))
return(plaintext)
def main():
# Das ist die Main Routine
text = b'Hallo falls das geht geht es gut!'
private_key = rsa_gen_key(2048)
output_file_key = open("./key.txt","wb")
encrypted_textfile = open("./text.enc", "wb")
output_file_key.write(rsa_serialize_keys(private_key)[0])
encrypted_textfile.write(rsa_enc(text, private_key.public_key()))
output_file_key.close()
encrypted_textfile.close()
if __name__ == '__main__':
main() |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
import os
import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
login_manager = LoginManager()
login_manager.init_app(app)
app.config.from_object(os.environ['APP_SETTINGS'])
bcryptObj = Bcrypt(app)
localSystem = None
db = SQLAlchemy(app)
from project.home.views import home_blueprint
from project.user.views import user_blueprint
from project.college.views import college_blueprint
from project.admin.views import admin_blueprint
from project.dataset.views import dataset_blueprint
app.register_blueprint(home_blueprint)
app.register_blueprint(user_blueprint)
app.register_blueprint(college_blueprint)
app.register_blueprint(admin_blueprint)
app.register_blueprint(dataset_blueprint)
from project.models import User
login_manager.login_view = "user.login"
@login_manager.user_loader
def load_user(user_id):
try:
user = User.query.filter(User.id == int(user_id)).first()
except:
user = None
return user |
#
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from functools import wraps
from contextlib import contextmanager
from aria import extension as aria_extension
from .context_adapter import CloudifyContextAdapter
@aria_extension.process_executor
class CloudifyExecutorExtension(object):
def decorate(self):
def decorator(function):
@wraps(function)
def wrapper(ctx, **operation_inputs):
# We assume that any Cloudify-based plugin would use the plugins-common, thus two
# different paths are created
is_cloudify_dependent = ctx.task.plugin and any(
'cloudify_plugins_common' in w for w in ctx.task.plugin.wheels)
if is_cloudify_dependent:
from cloudify import context
from cloudify.exceptions import (NonRecoverableError, RecoverableError)
with ctx.model.instrument(*ctx.INSTRUMENTATION_FIELDS):
# We need to create a new class dynamically, since CloudifyContextAdapter
# doesn't exist at runtime
ctx_adapter = type('_CloudifyContextAdapter',
(CloudifyContextAdapter, context.CloudifyContext),
{}, )(ctx)
exception = None
with _push_cfy_ctx(ctx_adapter, operation_inputs):
try:
function(ctx=ctx_adapter, **operation_inputs)
except NonRecoverableError as e:
ctx.task.abort(str(e))
except RecoverableError as e:
ctx.task.retry(str(e), retry_interval=e.retry_after)
except BaseException as e:
# Keep exception and raise it outside of "with", because
# contextmanager does not allow raising exceptions
exception = e
if exception is not None:
raise exception
else:
function(ctx=ctx, **operation_inputs)
return wrapper
return decorator
@contextmanager
def _push_cfy_ctx(ctx, params):
from cloudify import state
try:
# Support for Cloudify > 4.0
with state.current_ctx.push(ctx, params) as current_ctx:
yield current_ctx
except AttributeError:
# Support for Cloudify < 4.0
try:
original_ctx = state.current_ctx.get_ctx()
except RuntimeError:
original_ctx = None
try:
original_params = state.current_ctx.get_parameters()
except RuntimeError:
original_params = None
state.current_ctx.set(ctx, params)
try:
yield state.current_ctx.get_ctx()
finally:
state.current_ctx.set(original_ctx, original_params)
|
#!/usr/bin/env python3
"""
Generate different flavors of input assembly for testing.
"""
import os.path
import itertools
from common import set_basename, gcc, disasm, grep, strip_binary, find_address
set_basename(os.path.basename(__file__))
for gdb, pic, strip in itertools.product([False, True],
[False, True], # Do we need to test PIE too?
[False, True]):
# Print config
disasm_type = 'GDB' if gdb else 'objdump'
pic_type = 'position-INdependent' if pic else 'position-dependent'
stripped = 'stripped' if strip else 'UNstripped'
print(f'Checking {disasm_type} {pic_type} {stripped}')
# Generate object code
flags = ['jumptable.c', '-o', 'a.out',
'-Wl,--defsym,_start=0', '-nostdlib', '-nostartfiles', '-O2']
# DLL or executable?
if pic:
flags += ['-fPIC', '-shared']
gcc(flags)
# Strip
caller = 'bar'
start, finish = find_address('a.out', caller)
if strip:
strip_binary('a.out')
caller = None
# Generate disasm
out = disasm('a.out', not gdb, caller, start, finish)
# Print snippets
jumps = grep(out, r'\bjmp')
print('''\
table jumps:
{}
'''.format('\n '.join(jumps)))
|
from zope.interface import implementer
from zope.component.interfaces import IObjectEvent
class IPreRequestProcessingEvent(IObjectEvent):
""" notify before the request is processed and
before the transaction is beginning.
"""
@implementer(IPreRequestProcessingEvent)
class PreRequestProcessingEvent(object):
def __init__(self, context, request):
self.object = context
self.request = request
class IPostRequestProcessingEvent(IObjectEvent):
""" notify after request processing and after transaction commit.
"""
@implementer(IPostRequestProcessingEvent)
class PostRequestProcessingEvent(object):
def __init__(self, context, request):
self.object = context
self.request = request
class IApplicationStartupEvent(IObjectEvent):
""" notify one time at the application startup.
"""
@implementer(IApplicationStartupEvent)
class ApplicationStartupEvent(object):
def __init__(self, settings):
self.object = settings
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# SQLALCHEMY_DATABASE_URL = "mysql+mysqlconnector://root:123456@192.168.1.24:3306/sql_alembic?charset=utf8"
# SQLALCHEMY_DATABASE_URL = "mysql+mysqlclient://root:123456@192.168.1.24:3306/sql_alembic?charset=utf8"
SQLALCHEMY_DATABASE_URL = "mysql+mysqldb://root:123456@192.168.1.24:3306/sql_alembic?charset=utf8"
engine = create_engine(
SQLALCHEMY_DATABASE_URL
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
|
import random
# For the lab, complete modexp(), RSA_enc(), and RSA_dec().
# HW 2 will allow you to submit the fully completed code from the lab,
# as well as egcd(), mulinv(), and keygen(), for extra credit on the
# assignment.
# You must work independently since this assignment will be part of HW 2.
# test constants
test_p = 23
test_q = 47
test_e = 35
test_d = 347
message = "Hello world!"
def calc_n(p, q):
# do not modify!
return p * q
def calc_phi(p, q):
# do not modify!
return (p - 1) * (q - 1)
def modexp(b, e, m):
# returns b^e % m efficiently
if m == 1: return 0
result = 1
b = b % m
while( e > 0 ):
if(e%2 == 1):
result = (result*b)%m
e = e >> 1
b = (b * b) % m
return result
def RSA_enc(plaintext, key):
# key should be a tuple (n, e)
# return a list of integers
n, e = key
cipher = [(ord(char) ** e) % n for char in plaintext]
return cipher
def RSA_dec(ciphertext, key):
# key should be a tuple (n, e)
# return a string
n, e = key
plaintext = [chr((char ** e) % n) for char in ciphertext]
return ''.join(plaintext)
def test():
# do not modify!
n = calc_n(test_p, test_q)
private = [n, test_d]
public = [n, test_e]
print("Public key:",public)
print("Private key:",private)
print("Original message:",message)
ciphertext = RSA_enc(message,public)
print("Encrypted message:",ciphertext)
plaintext = RSA_dec(ciphertext,private)
print("Decrypted message:",plaintext)
# === Below this comment is the portions of this assignment that contribute to HW 2 ===
def egcd(b, n):
# runs the extended Euclidean algorithm on b and n
# returns a triple (g, x, y) s.t. bx + ny = g = gcd(b, n)
# https://en.wikibooks.org/wiki/Algorithm_Implementation
# /Mathematics/Extended_Euclidean_algorithm
x0, x1, y0, y1 = 1, 0, 0, 1
while n != 0:
q, b, n = b // n, n, b % n
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
return b, x0, y0
def mulinv(e, n):
# returns the multiplicative inverse of e in n
g, x, _ = egcd(e, n)
if g == 1:
return x % n
def checkprime(n, size):
# do not modify!
# determine if a number is prime
if n % 2 == 0 or n % 3 == 0: return False
i = 0
# fermat primality test, complexity ~(log n)^4
while i < size:
if modexp(random.randint(1, n - 1), n - 1, n) != 1: return False
i += 1
# division primality test
i = 5
while i * i <= n:
if n % i == 0: return False
i += 2
if n % i == 0: return False
i += 4
return True
def primegen(size):
# do not modify!
# generates a <size> digit prime
if(size == 1): return random.choice([2, 3, 5, 7])
lower = 10 ** (size - 1)
upper = 10 ** size - 1
p = random.randint(lower, upper)
p -= (p % 6)
p += 1
if p < lower: p += 6
elif p > upper: p -= 6
q = p - 2
while p < upper or q > lower:
if p < upper:
if checkprime(p, size): return p
p += 4
if q > lower:
if checkprime(q, size): return q
q -= 4
if p < upper:
if checkprime(p, size): return p
p += 2
if q > lower:
if checkprime(q, size): return q
q -= 2
def keygen(size):
# generate a random public/private key pair
# size is the digits in the rsa modulus, approximately. must be even, >2
# return a tuple of tuples, [[n, e], [n, d]]
assert(size % 2 == 0 and size > 2) # keep this line!
p = primegen(size)
q = primegen(size)
while p == q: # make sure p != q
q = primegen(size)
n = calc_n(p, q)
phi = calc_phi(p, q)
e = random.randrange(1, phi)
d = mulinv(e, phi)
return ((n, e), (n, d))
def customkeytest(text, size):
keypair = keygen(size)
print("Public key:",keypair[0])
print("Private key:",keypair[1])
print("Original message:",text)
ciphertext = RSA_enc(text,keypair[0])
print("Encrypted message:",ciphertext)
plaintext = RSA_dec(ciphertext,keypair[1])
print("Decrypted message:",plaintext)
if __name__ == "__main__":
test()
print "------------------------"
customkeytest(message, 4)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.